source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
main.py
|
import OWeather, modbusslave
import pandas as pd
import time
import threading
startflag = 0
# 启动modbus从站
modbusThread = threading.Thread(target=modbusslave.main)
modbusThread.start()
modbusThread.join(1)
# 获取需要查询的城市列表,及modbus地址
def getTarCityList():
TCL = r'tarcitylist.csv'
data = pd.read_csv(TCL, engine='python')
data = data.dropna()
TarCityList = {}
for name, address in zip(data['地名'], data['地址']):
TarCityList[name] = str(address)
return TarCityList
# 获取所有的城市列表,及相应的天气预报代码
def getCityList():
CL = r'citylist.csv'
data = pd.read_csv(CL, engine='python')
data = data.dropna()
CityList = {}
for name, code in zip(data['地名'], data['代码']):
CityList[name] = code
return CityList
# 写入实时天气的线程
def RT():
for name in cities:
cities[name].getRTData()
cities[name].setRTData()
time.sleep(2)
# 写入24小时天气的线程
def h24beforstart():
for name in cities:
cities[name].get24Data()
cities[name].set24Data()
time.sleep(2)
# 当零时刷新天气信息,再把天气信息写入log文件,以供备份
def h24zeroclock():
for name in cities:
cities[name].get24Data()
time.sleep(2)
cities[name].set24Data()
# 当本程序重启后,从log文件恢复当天的24小时天气数据
def restart():
for name in cities:
cities[name].getRTData()
cities[name].setRTData()
cities[name].get24Data()
cities[name].set24Data()
time.sleep(2)
# 按城市建立类
class City:
def __init__(self, CityName, CityCode, slave):
self.CityName = CityName
self.CityCode = CityCode
self.CitySlave = int(slave)
def getRTData(self):
self.CityRTWeather = OWeather.getRTWeather(self.CityCode)
def get24Data(self):
self.City24Weather = OWeather.get24Weather(self.CityCode)
print(self.City24Weather)
try:
hour24temp = open('log\\%shour24temp.log' % (self.CityName), 'r')
temp = hour24temp.readlines()
temp = list(map(int, temp))
hour24temp.close()
except:
temp = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
print('未能恢复24h数据')
hourflag = self.City24Weather[0][0]
i = 0
if 0 <= hourflag < 24:
while i < (24-hourflag):
temp[0 + hourflag+i] = self.City24Weather[0][i]
temp[24 + hourflag+i] = self.City24Weather[1][i]
temp[48 + hourflag+i] = self.City24Weather[2][i]
temp[72 + hourflag+i] = self.City24Weather[3][i]
i += 1
tempwrite = ''
for item in temp:
tempwrite = tempwrite + str(item) + '\n'
hour24temp = open('log\\%shour24temp.log' % (self.CityName), 'w')
hour24temp.write(tempwrite)
hour24temp.close()
def setRTData(self):
modbusslave.setRTData(self.CityName, self.CitySlave, self.CityRTWeather)
def set24Data(self):
hour24temp = open('log\\%shour24temp.log' % (self.CityName), 'r')
temp = hour24temp.readlines()
sortout = [[], [], [], []]
j = 0
for item in sortout:
i = 0
while i < 24:
item.append(int(temp[j]))
i += 1
j += 1
hour24temp.close()
modbusslave.set24Data(self.CityName, self.CitySlave, sortout)
# 获得需要查询的城市列表
TarCityList = getTarCityList()
CityList = getCityList()
cities = {}
# 将目标城市写入字典
for name in TarCityList:
cities[name] = City(name, CityList[name], TarCityList[name])
restart() # 重启程序后从log文件中恢复24小时数据
while True:
# 每到31分8秒时刷新实时数据
time_now1 = time.strftime("%M:%S", time.localtime())
if time_now1 == "31:08":
RTThread = threading.Thread(target=RT)
RTThread.start()
RTThread.join(1)
# 24小时预报2小时更新
# 每到0时50分10秒时刷新24小时数据
time_now3 = time.strftime("%H:%M:%S", time.localtime())
if time_now3 == "00:50:10":
h24Thread = threading.Thread(target=h24zeroclock)
h24Thread.start()
h24Thread.join(1)
# 每2小时40分10秒时刷新24小时数据
checktime = ["06:40:10", "08:40:10", "10:40:10", "12:40:10", "14:40:10",
"16:40:10", "18:40:10", "20:40:10", ]
if time_now3 in checktime:
h24Thread = threading.Thread(target=h24beforstart)
h24Thread.start()
h24Thread.join(1)
time.sleep(0.5)
|
test_menu.py
|
import signal
import threading
from queue import Queue
from django.conf import settings
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
from menu import Menu, MenuItem
# XXX TODO: test MENU_HIDE_EMPTY
class CustomMenuItem(MenuItem):
"""
Custom MenuItem subclass with custom check logic
"""
def check(self, request):
"""
We should be visible unless the request path ends with "foo"
"""
self.visible = not request.path.endswith("foo")
class MenuTests(TestCase):
"""
Tests for Menu
"""
def setUp(self):
"""
Build some menus for our tests
"""
self.kids3_2_desired_title = None
def kids3_2_title(request):
"Allow the title of kids3-2 to be changed"
if self.kids3_2_desired_title is not None:
return "-".join([request.path, self.kids3_2_desired_title])
return 'kids3-2'
def kids2_2_check(request):
"Hide kids2-2 whenever the request path ends with /hidden"
if request.path.endswith('/hidden'):
return False
return True
# Ensure we can pass children as tuples (or other iterables, like generators)
# Following the implementation of sorted children there was a bug reported due to children
# being passed as a tuple, which has no .sort method
# See: https://github.com/jazzband/django-simple-menu/issues/38
def kids2():
"Generator for kids2"
class RepeatIterator:
"We need this to be reusable -- http://stackoverflow.com/a/1985733"
def __iter__(self):
yield MenuItem("kids2-1", "/parent2/kids2-1", weight=999)
yield MenuItem("kids2-2", "/kids2-2", check=kids2_2_check)
return RepeatIterator()
def kids3_1(request):
"Callable for kids3-1"
return [
MenuItem("kids3-1-1", "/parent3/kids3-1/kid1", exact_url=True),
]
kids3 = (
CustomMenuItem("kids3-1", "/parent3/kids3-1", children=kids3_1, slug="salty"),
CustomMenuItem(kids3_2_title, "/parent3/kids3-2")
)
Menu.items = {}
Menu.sorted = {}
Menu.loaded = False
# add our items. because we set weight to 999 for parent 1 it will become the last child
# even though it's added first
Menu.add_item("test", MenuItem("Parent 1", "/parent1", weight=999))
Menu.add_item("test", MenuItem("Parent 2", "/parent2", children=kids2()))
Menu.add_item("test", MenuItem("Parent 3", "/parent3", children=kids3))
self.factory = RequestFactory()
def test_custom_menuitem(self):
"""
Ensure our custom check on our custom MenuItem works
"""
request = self.factory.get('/parent3/kids3-1')
items = Menu.process(request, 'test')
self.assertEqual(len(items[1].children), 2)
request = self.factory.get('/parent3/kids3-1/foo')
items = Menu.process(request, 'test')
self.assertEqual(len(items[1].children), 0)
def test_thread_safety_and_checks(self):
"""
Ensure our thread safety works, this also ensures our checks work
"""
# this shouldn't ever take more than 5 seconds, add a safety in case someting breaks
signal.alarm(5)
def t1(results):
"Closure for thread 1"
request = self.factory.get('/kids2-2/visible')
items = Menu.process(request, 'test')
results.put_nowait(len(items[0].children) == 2)
def t2(results):
"Closure for thread 2"
request = self.factory.get('/kids2-2/hidden')
items = Menu.process(request, 'test')
results.put_nowait(len(items[0].children) == 1)
results = Queue()
for _ in range(50):
threads = [
threading.Thread(target=t1, args=(results,)),
threading.Thread(target=t2, args=(results,))
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertTrue(all([
results.get()
for _ in range(100)
]))
def test_slug(self):
"""
Ensure our slugification works as expected
"""
request = self.factory.get('/parent3/kids3-1')
items = Menu.process(request, 'test')
self.assertEqual(items[1].slug, "parent-3")
self.assertEqual(items[1].children[0].slug, "salty")
def test_exact_url(self):
"""
Ensure that the exact_url setting works
"""
# the extra stuff will still cause kids3-2 to be selected
request = self.factory.get('/parent3/kids3-2/extra_stuff_here')
items = Menu.process(request, 'test')
self.assertEqual(items[1].children[1].selected, True)
# but here it won't, because exact_url is set
request = self.factory.get('/parent3/kids3-1/kid1/extra_stuff_here')
items = Menu.process(request, 'test')
self.assertEqual(items[1].children[0].children[0].selected, False)
def test_callable_title(self):
"""
Ensure callable titles work
"""
self.kids3_2_desired_title = "fun"
request = self.factory.get('/parent3')
items = Menu.process(request, 'test')
self.assertEqual(items[1].children[1].title, "/parent3-fun")
def test_select_parents(self):
"""
Ensure the MENU_SELECT_PARENTS setting works
"""
settings.MENU_SELECT_PARENTS = False
request = self.factory.get('/parent2/kids2-1')
items = Menu.process(request, 'test')
self.assertEqual(items[0].selected, True)
self.assertEqual(items[0].children[1].selected, True)
self.assertEqual(items[1].selected, False)
request = self.factory.get('/kids2-2')
items = Menu.process(request, 'test')
self.assertEqual(items[0].selected, False)
self.assertEqual(items[0].children[0].selected, True)
self.assertEqual(items[1].selected, False)
settings.MENU_SELECT_PARENTS = True
request = self.factory.get('/kids2-2')
items = Menu.process(request, 'test')
self.assertEqual(items[0].selected, True)
self.assertEqual(items[0].children[0].selected, True)
self.assertEqual(items[1].selected, False)
request = self.factory.get('/parent3/kids3-1/kid1')
items = Menu.process(request, 'test')
self.assertEqual(items[0].selected, False)
self.assertEqual(items[0].children[1].selected, False)
self.assertEqual(items[1].selected, True)
self.assertEqual(items[1].children[0].selected, True)
self.assertEqual(items[1].children[0].children[0].selected, True)
self.assertEqual(items[1].children[1].selected, False)
self.assertEqual(items[2].selected, False)
def test_template_tag(self):
"""
Ensure the templating works
"""
request = self.factory.get('/parent3/kids3-1')
out = Template(
"{% load menu %}"
"{% generate_menu %}"
"{% for item in menus.test %}"
"{{ item.title }},"
"{% for child in item.children %}"
"{{ child.title }},"
"{% for grandchild in child.children %}"
"{{ grandchild.title }},"
"{% endfor %}"
"{% endfor %}"
"{% endfor %}"
).render(Context({
'request': request,
}))
self.assertEqual(out, "Parent 2,kids2-2,kids2-1,Parent 3,kids3-1,kids3-1-1,kids3-2,Parent 1,")
def test_template_tag_missing_attribute(self):
"""
Missing attributes should not raise exceptions in templates
"""
request = self.factory.get('/parent2/kids2-1')
out = Template(
"{% load menu %}"
"{% generate_menu %}"
"{% for item in menus.test %}"
"{{ item.title }}{{ item.doesntexist }},"
"{% endfor %}"
).render(Context({
'request': request,
}))
self.assertEqual(out, "Parent 2,Parent 3,Parent 1,")
class MenuItemTests(TestCase):
"""
Tests for MenuItem
"""
def test_kwargs(self):
"""
MenuItems should accept arbitrary keyword args
"""
item = MenuItem("test", "/test", arbitrary=True, dictionary={'a': 1})
self.assertTrue(item.arbitrary)
self.assertEqual(item.dictionary, {'a': 1})
self.assertRaises(AttributeError, lambda: item.nope)
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import logging
import optparse
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
import multiprocessing
import errno
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
from Queue import Queue, Empty
logger = logging.getLogger('testrunner')
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases):
self.cases = cases
self.parallel_queue = Queue(len(cases))
self.sequential_queue = Queue(len(cases))
for case in cases:
if case.parallel:
self.parallel_queue.put_nowait(case)
else:
self.sequential_queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.lock = threading.Lock()
self.shutdown_event = threading.Event()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[True, i + 1])
threads.append(thread)
thread.start()
try:
self.RunSingle(False, 0)
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except (KeyboardInterrupt, SystemExit), e:
self.shutdown_event.set()
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.shutdown_event.set()
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self, parallel, thread_id):
while not self.shutdown_event.is_set():
try:
test = self.parallel_queue.get_nowait()
except Empty:
if parallel:
return
try:
test = self.sequential_queue.get_nowait()
except Empty:
return
case = test.case
case.thread_id = thread_id
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
case.duration = (datetime.now() - start)
except IOError, e:
return
if self.shutdown_event.is_set():
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class TapProgressIndicator(SimpleProgressIndicator):
def Starting(self):
logger.info('1..%i' % len(self.cases))
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
command = basename(output.command[-1])
if output.UnexpectedOutput():
logger.info('not ok %i - %s' % (self._done, command))
for l in output.output.stderr.splitlines():
logger.info('#' + l)
for l in output.output.stdout.splitlines():
logger.info('#' + l)
else:
logger.info('ok %i - %s' % (self._done, command))
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
logger.info(' ---')
logger.info(' duration_ms: %d.%d' % (total_seconds, duration.microseconds / 1000))
logger.info(' ...')
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, templates):
super(CompactProgressIndicator, self).__init__(cases)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, arch, mode):
self.path = path
self.context = context
self.duration = None
self.arch = arch
self.mode = mode
self.parallel = False
self.thread_id = 0
def IsNegative(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command, env):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode),
env)
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand(), {
"TEST_THREAD_ID": "%d" % self.thread_id
})
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
while True:
try:
os.unlink(name)
except OSError, e:
# On Windows unlink() fails if another process (typically a virus scanner
# or the indexing service) has the file open. Those processes keep a
# file open for a short time only, so yield and try again; it'll succeed.
if sys.platform == 'win32' and e.errno == errno.EACCES:
time.sleep(0)
continue
PrintError("os.unlink() " + str(e))
break
def Execute(args, context, timeout=None, env={}):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
# Extend environment
env_copy = os.environ.copy()
for key, value in env.iteritems():
env_copy[key] = value
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
env = env_copy
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, arch, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path,
arch, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, arch, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, arch, mode)
result.sort(cmp=lambda a, b: cmp(a.GetName(), b.GetName()))
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'armv6' : { 'debug' : 12, 'release' : 3 }, # The ARM buildbots are slow.
'arm' : { 'debug' : 8, 'release' : 2 },
'ia32' : { 'debug' : 4, 'release' : 1 } }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, arch, mode):
if arch == 'none':
name = 'out/Debug/iojs' if mode == 'debug' else 'out/Release/iojs'
else:
name = 'out/%s.%s/iojs' % (arch, mode)
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/iojs.exe or Debug/iojs.exe
if utils.IsWindows():
out_dir = os.path.join(dirname(__file__), "..", "out")
if not exists(out_dir):
if mode == 'debug':
name = os.path.abspath('Debug/iojs.exe')
else:
name = os.path.abspath('Release/iojs.exe')
else:
name = os.path.abspath(name + '.exe')
return name
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[ARCH_GUESS or 'ia32'][mode]
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
self.parallel = self.case.parallel
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option('--logfile', dest='logfile',
help='write test output to file. NOTE: this only applies the tap progress indicator')
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono, tap)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--special-command", default=None)
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("-J", help="Run tasks in parallel on all cores",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.arch = options.arch.split(',')
options.mode = options.mode.split(',')
if options.J:
options.j = multiprocessing.cpu_count()
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = [
'sequential',
'parallel',
'pummel',
'message',
'internet',
'addons',
'gc',
'debugger',
]
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
ch = logging.StreamHandler(sys.stdout)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
if options.logfile:
fh = logging.FileHandler(options.logfile)
logger.addHandler(fh)
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for arch in options.arch:
for mode in options.mode:
vm = context.GetVm(arch, mode)
if not exists(vm):
print "Can't find shell executable: '%s'" % vm
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': arch,
}
test_list = root.ListTests([], path, context, arch, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = (
config.ClassifyTests(test_list, env))
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = (
globally_unused_rules.intersection(unused_rules))
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 1
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
gso.py
|
from pgso.evaluate import error, evaluate, update_velocity, update_position
from multiprocessing import Manager, Process, Lock
from pgso.init_particles import create_n_particles
from numba import jit
import numpy as np
# @jit
def PSO_purana(costFunc,bounds,maxiter,swarm_init=None, log=False, the_list=None):
num_dimensions=len(swarm_init[0])
err_best_g=-1 # best error for group
pos_best_g=[] # best position for group
num_particles = len(swarm_init)
# establish the swarm
swarm = create_n_particles(num_particles, num_dimensions, swarm_init)
if log:
err_log_list = []
# begin optimization loop
i=0
while i < maxiter:
#print i,err_best_g
# cycle through particles in swarm and evaluate fitness
for j in range(0,num_particles):
swarm[j]['pos_best_i'], swarm[j]['err_best_i'] = evaluate(costFunc, swarm[j])
# determine if current particle is the best (globally)
if swarm[j]['err_i'] < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j]['position_i'])
err_best_g=float(swarm[j]['err_i'])
# cycle through swarm and update velocities and position
for j in range(0,num_particles):
swarm[j]['velocity_i'] = update_velocity(pos_best_g, swarm[j])
swarm[j]['position_i'] = update_position(bounds, swarm[j])
i+=1
if log:
err_log_list.append(err_best_g)
if log:
the_list.append(err_log_list)
# print final results
#print ('\n')
#print (pos_best_g,' , ', err_best_g)
return pos_best_g[0], err_best_g
# @jit
def PSO(costFunc,bounds,maxiter,shared_list, return_list, l,num_particles=None,swarm_init=None, log=True, t_list=None):
num_dimensions=len(swarm_init[0])
err_best_g=-1 # best error for group
pos_best_g=[] # best position for group
num_particles = len(swarm_init)
# establish the swarm
swarm = create_n_particles(num_particles, num_dimensions, swarm_init)
# begin optimization loop
i=0
while i < maxiter:
#print i,err_best_g
# cycle through particles in swarm and evaluate fitness
for j in range(0,num_particles):
best_pos, swarm[j]['err_best_i'] = evaluate(costFunc, swarm[j])
swarm[j]['pos_best_i'] = best_pos
# determine if current particle is the best (globally)
if swarm[j]['err_i'] < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j]['position_i'])
err_best_g=float(swarm[j]['err_i'])
# update the global best in the manager list after k iterations
# we need to add some mutex lock here
if i == maxiter//2:
l.acquire()
best_galactic_pos = shared_list[0]
best_galactic_err = shared_list[1]
#print("best_galactic_err: " ,best_galactic_err)
#print("best_galactic_pos: ", best_galactic_pos)
if err_best_g < best_galactic_err and err_best_g != -1:
shared_list[1] = err_best_g
#print(err_best_g)
shared_list[0] = pos_best_g
else:
#print("changing pos_best_g from", pos_best_g, " to ", best_galactic_pos)
#emp_list = []
err_best_g = float(best_galactic_err)
#emp_list.append(best_galactic_pos)
pos_best_g = [best_galactic_pos]
l.release()
# cycle through swarm and update velocities and position
for j in range(0,num_particles):
swarm[j]['velocity_i'] = update_velocity(pos_best_g, swarm[j])
swarm[j]['position_i'] = update_position(bounds, swarm[j])
if log:
t_list.append(err_best_g)
i+=1
return_list.append(pos_best_g[0])
def start(process_list):
for p in process_list:
p.start()
def stop(process_list):
for p in process_list:
p.join()
# @jit
def GSO(M, bounds, num_particles, max_iter, costFunc, log=False, the_list=None):
"""
Galactic Swarm Optimization:
----------------------------
A meta-heuristic algorithm insipred by the interplay
of stars, galaxies and superclusters under the influence
of gravity.
Input:
------
M: integer
number of galaxies
bounds:
bounds of the search space
"""
subswarm_bests = []
dims = len(bounds)
lb = bounds[0][0]
ub = bounds[0][1]
manager = Manager()
l = Lock()
shared_list = manager.list()
return_list = manager.list()
shared_list = [np.random.uniform(lb, ub, dims), 10000000] # like np.inf
all_processes = []
list1 = manager.list()
list2 = manager.list()
list3 = manager.list()
list4 = manager.list()
list5 = manager.list()
for i in range(M):
#initial= np.random.uniform(-10,10, 2) # initial starting location [x1,x2...]
swarm_init = []
for _ in range(num_particles):
swarm_init.append(np.random.uniform(lb, ub, dims))
if log:
if i == 0:
p = Process(target=PSO, args=(costFunc, bounds, max_iter, shared_list, return_list, l, None, swarm_init, True, list1))
elif i == 1:
p = Process(target=PSO, args=(costFunc, bounds, max_iter, shared_list, return_list, l, None, swarm_init, True, list2))
elif i == 2:
p = Process(target=PSO, args=(costFunc, bounds, max_iter, shared_list, return_list, l, None, swarm_init, True, list3))
elif i == 3:
p = Process(target=PSO, args=(costFunc, bounds, max_iter, shared_list, return_list, l, None, swarm_init, True, list4))
elif i == 4:
p = Process(target=PSO, args=(costFunc, bounds, max_iter, shared_list, return_list, l, None, swarm_init, True, list5))
else:
p = Process(target=PSO, args=(costFunc, bounds, max_iter, shared_list, return_list, l, None,swarm_init))
all_processes.append(p)
start(all_processes)
stop(all_processes)
if log:
the_list.append(list1)
the_list.append(list2)
the_list.append(list3)
the_list.append(list4)
the_list.append(list5)
# print(return_list)
else:
the_list = None
log = False
return PSO_purana(error, bounds, max_iter, swarm_init=list(return_list), log=log, the_list=the_list)
|
multiprocessuse.py
|
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.options import define
import multiprocessing
'''
最直接的办法,用多进程调用。但是问题是每个实例只能绑定不同的端口,否则就会报错。
'''
define('port', default=8000, type=int)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
quote = {'quote': (
"I've always been more interested in "
"the future than in the past."
),
'author': 'Grace Hopper'
}
respon_json = tornado.escape.json_encode(quote)
self.write(respon_json)
def startapp(port):
print(port)
app = tornado.web.Application(handlers=[(r"/", IndexHandler)])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
processes = [multiprocessing.Process(target=startapp,args=(i,)) for i in range(8881,8890)]
for p in processes:
p.start()
for p in processes:
p.join()
|
constellation_exhibit.py
|
# Standard imports
import configparser
import datetime
import logging
import shutil
import threading
import time
import os
# Non-standard imports
import icmplib
import wakeonlan
# Constellation imports
import config
class ExhibitComponent:
"""Holds basic data about a component in the exhibit"""
def __init__(self, id_, this_type, category='dynamic'):
# category='dynamic' for components that are connected over the network
# category='static' for components added from currentExhibitConfiguration.ini
self.id = id_
self.type = this_type
self.category = category
self.ip = "" # IP address of client
self.helperPort = 8000 # port of the localhost helper for this component DEPRECIATED
self.helperAddress = None # full IP and port of helper
self.macAddress = None # Added below if we have specified a Wake on LAN device
self.broadcastAddress = "255.255.255.255"
self.WOLPort = 9
self.last_contact_datetime = datetime.datetime.now()
self.lastInteractionDateTime = datetime.datetime(2020, 1, 1)
self.config = {"commands": [],
"allowed_actions": [],
"description": config.componentDescriptions.get(id_, ""),
"AnyDeskID": ""}
if category != "static":
self.update_configuration()
# Check if we have specified a Wake on LAN device matching this id
# If yes, subsume it into this component
wol = get_wake_on_LAN_component(self.id)
if wol is not None:
self.macAddress = wol.macAddress
if "power_on" not in self.config["allowed_actions"]:
self.config["allowed_actions"].append("power_on")
if "shutdown" not in self.config["allowed_actions"]:
self.config["allowed_actions"].append("power_off")
config.wakeOnLANList = [x for x in config.wakeOnLANList if x.id != wol.id]
def seconds_since_last_contact(self) -> float:
"""Return the number of seconds since a ping was received"""
diff = datetime.datetime.now() - self.last_contact_datetime
return diff.total_seconds()
def seconds_since_last_interaction(self) -> float:
"""Return the number of seconds since an interaction was recorded"""
diff = datetime.datetime.now() - self.lastInteractionDateTime
return diff.total_seconds()
def update_last_contact_datetime(self):
# We've received a new ping from this component, so update its
# last_contact_datetime
self.last_contact_datetime = datetime.datetime.now()
def update_last_interaction_datetime(self):
# We've received a new interaction ping, so update its
# lastInteractionDateTime
self.lastInteractionDateTime = datetime.datetime.now()
def current_status(self) -> str:
"""Return the current status of the component
Options: [OFFLINE, SYSTEM ON, ONLINE, ACTIVE, WAITING]
"""
if self.category == "static":
return "STATIC"
if self.seconds_since_last_contact() < 30:
if self.seconds_since_last_interaction() < 10:
status = "ACTIVE"
else:
status = "ONLINE"
elif self.seconds_since_last_contact() < 60:
status = "WAITING"
else:
# If we haven't heard from the component, we might still be able
# to ping the PC and see if it is alive
status = self.update_PC_status()
return status
def update_configuration(self):
"""Retrieve the latest configuration data from the configParser object"""
try:
file_config = dict(config.currentExhibitConfiguration.items(self.id))
for key in file_config:
if key == 'content':
self.config[key] = [s.strip() for s in file_config[key].split(",")]
elif key == "description":
pass # This is specified elsewhere
else:
self.config[key] = file_config[key]
except configparser.NoSectionError:
pass
# print(f"Warning: there is no configuration available for component with id={self.id}")
# with config.logLock:
# logging.warning(f"there is no configuration available for component with id={self.id}")
self.config["current_exhibit"] = config.currentExhibit[0:-8]
def queue_command(self, command):
"""Queue a command to be sent to the component on the next ping"""
if (command in ["power_on", "wakeDisplay"]) and (self.macAddress is not None):
self.wake_with_LAN()
else:
print(f"{self.id}: command queued: {command}")
self.config["commands"].append(command)
print(f"{self.id}: pending commands: {self.config['commands']}")
def wake_with_LAN(self):
# Function to send a magic packet waking the device
if self.macAddress is not None:
print(f"Sending wake on LAN packet to {self.id}")
with config.logLock:
logging.info(f"Sending wake on LAN packet to {self.id}")
try:
wakeonlan.send_magic_packet(self.macAddress,
ip_address=self.broadcastAddress,
port=self.WOLPort)
except ValueError as e:
print(f"Wake on LAN error for component {self.id}: {str(e)}")
with config.logLock:
logging.error(f"Wake on LAN error for component {self.id}: {str(e)}")
def update_PC_status(self):
"""If we have an IP address, ping the host to see if it is awake"""
status = "UNKNOWN"
if self.ip is not None:
try:
ping = icmplib.ping(self.ip, privileged=False, count=1, timeout=0.05)
if ping.is_alive:
status = "SYSTEM ON"
elif self.seconds_since_last_contact() > 60:
status = "OFFLINE"
else:
status = "WAITING"
except icmplib.exceptions.SocketPermissionError:
if "wakeOnLANPrivilege" not in config.serverWarningDict:
print(
"Warning: to check the status of Wake on LAN devices, you must run the control server with administrator privileges.")
with config.logLock:
logging.info(f"Need administrator privilege to check Wake on LAN status")
config.serverWarningDict["wakeOnLANPrivilege"] = True
return status
class WakeOnLANDevice:
"""Holds basic information about a wake on LAN device and facilitates waking it"""
def __init__(self, id_, mac_address, ip_address=None):
self.id = id_
self.type = "WAKE_ON_LAN"
self.macAddress = mac_address
self.broadcastAddress = "255.255.255.255"
self.port = 9
self.ip = ip_address
self.config = {"allowed_actions": ["power_on"],
"description": config.componentDescriptions.get(id_, "")}
self.state = {"status": "UNKNOWN"}
self.last_contact_datetime = datetime.datetime(2020, 1, 1)
def seconds_since_last_contact(self) -> float:
diff = datetime.datetime.now() - self.last_contact_datetime
return diff.total_seconds()
def queue_command(self, cmd):
"""Wrapper function to match other exhibit components"""
if cmd in ["power_on", "wakeDisplay"]:
self.wake()
def wake(self):
"""Function to send a magic packet waking the device"""
print(f"Sending wake on LAN packet to {self.id}")
with config.logLock:
logging.info(f"Sending wake on LAN packet to {self.id}")
try:
wakeonlan.send_magic_packet(self.macAddress,
ip_address=self.broadcastAddress,
port=self.port)
except ValueError as e:
print(f"Wake on LAN error for component {self.id}: {str(e)}")
with config.logLock:
logging.error(f"Wake on LAN error for component {self.id}: {str(e)}")
def update(self):
"""If we have an IP address, ping the host to see if it is awake"""
if self.ip is not None:
try:
ping = icmplib.ping(self.ip, privileged=False, count=1)
if ping.is_alive:
self.state["status"] = "SYSTEM ON"
self.last_contact_datetime = datetime.datetime.now()
elif self.seconds_since_last_contact() > 60:
self.state["status"] = "OFFLINE"
except icmplib.exceptions.SocketPermissionError:
if "wakeOnLANPrivilege" not in config.serverWarningDict:
print(
"Warning: to check the status of Wake on LAN devices, you must run the control server with administrator privileges.")
with config.logLock:
logging.info(f"Need administrator privilege to check Wake on LAN status")
config.serverWarningDict["wakeOnLANPrivilege"] = True
else:
self.state["status"] = "UNKNOWN"
def add_exhibit_component(this_id, this_type, category="dynamic") -> ExhibitComponent:
"""Create a new ExhibitComponent, add it to the config.componentList, and return it"""
component = ExhibitComponent(this_id, this_type, category)
config.componentList.append(component)
return component
def check_available_exhibits():
"""Get a list of available "*.exhibit" configuration files"""
config.exhibit_list = []
exhibits_path = os.path.join(config.APP_PATH, "exhibits")
with config.exhibitsLock:
for file in os.listdir(exhibits_path):
if file.lower().endswith(".exhibit"):
config.exhibit_list.append(file)
def command_all_exhibit_components(cmd):
"""Queue a command for every exhibit component"""
print("Sending command to all components:", cmd)
with config.logLock:
logging.info("command_all_exhibit_components: %s", cmd)
for component in config.componentList:
component.queue_command(cmd)
for projector in config.projectorList:
projector.queue_command(cmd)
for device in config.wakeOnLANList:
device.queue_command(cmd)
def create_new_exhibit(name, clone):
"""Create a new exhibit file
Set clone=None to create a new file, or set it equal to the name of an
existing exhibit to clone that exhibit."""
# Make sure we have the proper extension
if not name.lower().endswith(".exhibit"):
name += ".exhibit"
new_file = os.path.join(config.APP_PATH, "exhibits", name)
if clone is not None:
# Copy an existing file
# Make sure we have the proper extension on the file we're copying from
if not clone.lower().endswith(".exhibit"):
clone += ".exhibit"
existing_file = os.path.join(config.APP_PATH, "exhibits", clone)
shutil.copyfile(existing_file, new_file)
else:
# Make a new file
with config.exhibitsLock:
if not os.path.isfile(new_file):
# If this file does not exist, touch it so that it does.
with open(new_file, "w", encoding='UTF-8'):
pass
check_available_exhibits()
def delete_exhibit(name):
"""Delete the specified exhibit file"""
# Make sure we have the proper extension
if not name.lower().endswith(".exhibit"):
name += ".exhibit"
file_to_delete = os.path.join(config.APP_PATH, "exhibits", name)
with config.exhibitsLock:
try:
os.remove(file_to_delete)
except FileNotFoundError:
print(f"Error: Unable to delete exhibit {file_to_delete}. File not found!")
check_available_exhibits()
def get_exhibit_component(this_id) -> ExhibitComponent:
"""Return a component with the given id, or None if no such component exists"""
return next((x for x in config.componentList if x.id == this_id), None)
def get_wake_on_LAN_component(this_id) -> WakeOnLANDevice:
"""Return a WakeOnLan device with the given id, or None if no such component exists"""
return next((x for x in config.wakeOnLANList if x.id == this_id), None)
def poll_wake_on_LAN_devices():
"""Ask every Wake on LAN device to report its status at an interval.
"""
for device in config.wakeOnLANList:
new_thread = threading.Thread(target=device.update)
new_thread.daemon = True # So it dies if we exit
new_thread.start()
config.polling_thread_dict["poll_wake_on_LAN_devices"] = threading.Timer(30, poll_wake_on_LAN_devices)
config.polling_thread_dict["poll_wake_on_LAN_devices"].start()
def read_exhibit_configuration(name, updateDefault=False):
# We want the format of name to be "XXXX.exhibit", but it might be
# "exhibits/XXXX.exhibit"
error = False
split_path = os.path.split(name)
if len(split_path) == 2:
if split_path[0] == "exhibits":
name = split_path[1]
elif split_path[0] == "":
pass
else:
error = True
else:
error = True
if error:
# Something bad has happened. Display an error and bail out
print(
f"Error: exhibit definition with name {name} does not appear to be properly formatted. This file should be located in the exhibits directory.")
with config.logLock:
logging.error('Bad exhibit definition filename: %s', name)
return
config.currentExhibit = name
config.currentExhibitConfiguration = configparser.ConfigParser()
exhibit_path = os.path.join(config.APP_PATH, "exhibits")
config.currentExhibitConfiguration.read(exhibit_path)
if updateDefault:
configReader = configparser.ConfigParser(delimiters="=")
configReader.optionxform = str # Override default, which is case in-sensitive
cEC_path = os.path.join(config.APP_PATH,
'currentExhibitConfiguration.ini')
with config.galleryConfigurationLock:
configReader.read(cEC_path)
configReader.set("CURRENT", "current_exhibit", name)
with open(cEC_path, "w", encoding="UTF-8") as f:
configReader.write(f)
def set_component_content(id_, content_list):
"""Loop the content list and build a string to write to the config file"""
content = ", ".join(content_list)
with config.galleryConfigurationLock:
try:
config.currentExhibitConfiguration.set(id_, "content", content)
except configparser.NoSectionError: # This exhibit does not have content for this component
config.currentExhibitConfiguration.add_section(id_)
config.currentExhibitConfiguration.set(id_, "content", content)
# Update the component
get_exhibit_component(id_).update_configuration()
# Write new configuration to file
with config.galleryConfigurationLock:
with open(os.path.join(config.APP_PATH, "exhibits", config.currentExhibit),
'w', encoding="UTF-8") as f:
config.currentExhibitConfiguration.write(f)
def update_synchronization_list(this_id, other_ids):
"""Manage synchronization between components.
config.synchronizationList is a list of dictionaries, with one dictionary for every
set of synchronized components.
"""
print(f"Received sync request from {this_id} to sync with {other_ids}")
print(f"Current synchronizationList: {config.synchronizationList}")
id_known = False
index = 0
match_index = -1
for item in config.synchronizationList:
if this_id in item["ids"]:
id_known = True
match_index = index
index += 1
if id_known is False:
# Create a new dictionary
temp = {"ids": [this_id] + other_ids}
temp["checked_in"] = [False for _ in temp["ids"]]
(temp["checked_in"])[0] = True # Check in the current id
config.synchronizationList.append(temp)
else:
index = (config.synchronizationList[match_index])["ids"].index(this_id)
((config.synchronizationList[match_index])["checked_in"])[index] = True
if all((config.synchronizationList[match_index])["checked_in"]):
print("All components have checked in. Dispatching sync command")
time_to_start = str(round(time.time() * 1000) + 10000)
for item in (config.synchronizationList[match_index])["ids"]:
get_exhibit_component(item).queue_command(f"beginSynchronization_{time_to_start}")
# Remove this sync from the list in case it happens again later.
config.synchronizationList.pop(match_index)
def update_exhibit_component_status(data, ip):
"""Update an ExhibitComponent with the values in a dictionary."""
this_id = data["id"]
this_type = data["type"]
component = get_exhibit_component(this_id)
if component is None: # This is a new id, so make the component
component = add_exhibit_component(this_id, this_type)
component.ip = ip
if "helperPort" in data:
component.helperPort = data["helperPort"]
if "helperAddress" in data:
component.helperAddress = data["helperAddress"]
component.update_last_contact_datetime()
if "AnyDeskID" in data:
component.config["AnyDeskID"] = data["AnyDeskID"]
if "currentInteraction" in data:
if data["currentInteraction"].lower() == "true":
component.update_last_interaction_datetime()
if "allowed_actions" in data:
allowed_actions = data["allowed_actions"]
for key in allowed_actions:
if allowed_actions[key].lower() in ["true", "yes", "1"]:
if key not in component.config["allowed_actions"]:
component.config["allowed_actions"].append(key)
else:
component.config["allowed_actions"] = [x for x in component.config["allowed_actions"] if x != key]
if "error" in data:
component.config["error"] = data["error"]
else:
if "error" in component.config:
component.config.pop("error")
# Set up log file
log_path = os.path.join(config.APP_PATH, "control_server.log")
logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S',
filename=log_path,
format='%(levelname)s, %(asctime)s, %(message)s',
level=logging.DEBUG)
|
hippo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import pygame
import random
import cv2
import canvas
import numpy as np
import itertools
import time
import config
import play
import multiprocessing
import arduino_client
import config
black_ = 0, 0, 0
# If no activity is detected for timeout_ second, starts random activity.
timeout_ = 10.0
timeWithoutActivity_ = 0
# OpenCV highgui
def on_mouse(event, x, y, flag, params ):
global timeWithoutActivity_
if event == 1:
timeWithoutActivity_ = 0
W = config.w_ / config.num_notes_
note = int(x / W) + 1
if y > 400:
canvas.inject_alphabet_ca3(note, do_play = True)
if note == 8:
canvas.resetAll()
cv2.setMouseCallback( canvas.winName_, on_mouse )
with open( './songs_format.txt', 'r' ) as f:
txt = f.read()
seqs_ = [ x for x in txt.split( '\n' ) if x.strip() ]
def handle_arduio_command( line, q ):
global seqs_
cmd, arg = line[:2], line[2:]
if len(line) < 2:
return
print( cmd, arg )
if cmd == '#B':
canvas.inject_alphabet_ca3(1+int(arg))
timeWithoutActivity_ = 0
elif cmd == '#P':
canvas.progressFromArduino(arg)
elif cmd == '#R':
print( 'Arduino said reset everything.' )
play.play('a1')
canvas.resetAll()
while not q.empty():
q.get()
elif cmd == '#S':
play.play_seq( seqs_[int(arg)] )
elif cmd == '#T':
play.play( arg )
else:
print( 'Uknown command: %s' % line )
def runApp(q):
global timeWithoutActivity_, timeout_
canvas.init()
t = 0
for i in itertools.count():
t0 = time.time()
canvas.update_graphs()
if i % 2 == 0:
canvas.plot_graphs()
if i % 20 == 0:
canvas.add_inset( )
k = 0.85
# img = k*config.canvas_ + (1-k)*config.backgroundImg_
img = config.canvas_
canvas.show_frame(np.uint8(img))
dt = time.time() - t0
t += dt
timeWithoutActivity_ += dt
# check for arduino input.
if not q.empty():
line = q.get()
handle_arduio_command( line, q )
## if auto is enabled then inject random stimulus.
#if timeWithoutActivity_ > timeout_:
# canvas.inject_alphabet_ca3( random.choice(config.alphabets_))
def main( args ):
config.args_ = args
# Launch the arduino client in a separate process.
q = multiprocessing.Queue()
p = multiprocessing.Process( target=arduino_client.main, args=(q,))
p.start()
runApp(q)
p.join()
if __name__ == '__main__':
import argparse
# Argument parser.
description = '''Hippocampus.'''
parser = argparse.ArgumentParser(description=description)
class Args: pass
args = Args()
parser.parse_args(namespace=args)
try:
main( args )
except KeyboardInterrupt as e:
pass
|
import_financials.py
|
#!/usr/bin/env python
import argparse
import bs4
import datetime
import time
import logging
import re
import requests
from bs4 import BeautifulSoup
import threading
import common
import data
import Quandl
####
# Setup tor to use for all imports
import socks
import socket
import requests
#socks.setdefaultproxy(proxy_type=socks.PROXY_TYPE_SOCKS5, addr="127.0.0.1", port=9050)
#socket.socket = socks.socksocket
#####
QUAND_KEY = "1BCHxHp1ExoE4hXRmafE"
BATCH = 100
LOGGER = logging.getLogger('import_financial_data')
MONEY = { '': 10**3, 'M': 10**6, 'B': 10**9 }
MONEY_RE = re.compile(r'^\$?(\-?\d+\.?\d*)([MB])?$')
def get_time():
now = datetime.date.today()
return now
def chunks(list, n):
for i in xrange(0, len(list), n):
yield list[i:i+n]
def check_valid(value):
if value == 'N/A':
value = None
return value
if value is None:
value = None
return value
value = str(value)
value = value.replace(',', '')
return value
def decode_float(value):
if isinstance(value, float):
return value
value = check_valid(value)
if value is None:
return value
try:
value = float(value)
return value
except:
print "could not convert value %s" % value
return value
def decode_percent(value):
value = check_valid(value)
if value is None:
return value
percent = '%'
if value.endswith(percent):
value = value.strip(percent)
return float(value)
def decode_money(value):
value = check_valid(value)
if not value:
return None
results = MONEY_RE.search(value)
if not results:
raise TypeError('invalid money: %s' % value)
value = float(results.group(1))
abbr = results.group(2) or ''
return float(value * MONEY[abbr]) / 1000
def decode_quandl(string):
value_list = []
string = str(string)
value = re.search(r'\d{4}.*', string)
value = value.group()
value_list = value.split(' ')
value = (value_list[-1])
return value
def get_yahoo_roa(companies):
url = 'https://finance.yahoo.com/q/ks'
for company in companies:
LOGGER.info('Getting ks: %s' % company.symbol)
map_data = {
'Return on Assets (ttm):': {
'key': 'return_on_assets',
'decode': decode_percent,
},
'Return on Equity (ttm):': {
'key': 'return_on_equity',
'decode': decode_percent,
},
}
response = requests.get(url, params={'s': company.symbol})
soup = BeautifulSoup(response.text, 'html.parser')
for doc in soup.body.find_all('tr'):
try:
md = map_data[doc.td.text]
if doc.td.text in map_data:
md['value'] = doc.contents[1].text.strip()
except:
continue
extra = {}
for md in map_data.values():
if 'value' not in md:
continue
value = md['decode'](md['value'])
if value is not None:
extra[md['key']] = value
if extra:
timestamp = get_time()
LOGGER.info('Setting ks: %s: %s' % (company.symbol, extra))
data.set_financial_data(company=company, symbol=company.symbol, date=timestamp, **extra)
else:
LOGGER.info('Skipping ks: %s' % company.symbol)
def get_quandl(companies):
for i, company in enumerate(companies):
q_codes ={
"net_income" : "NET_INCOME_Q",
"total_assets" : "TOTAL_ASSETS_Q",
"shares_outstanding" : "TOTAL_COMMON_SHARES_OUTSTANDING_Q"
}
financials = {}
LOGGER.info('Getting quandl income & assets for: %s' % company.symbol)
for k, v in q_codes.iteritems():
code = "RAYMOND/" + company.symbol + "_" + v
try:
stat = Quandl.get(code, rows="1", authtoken=QUAND_KEY)
stat = decode_quandl(stat)
stat = decode_float(stat)
financials.update({k : stat})
except:
stat = "N/A"
stat = decode_float(stat)
financials.update({k : stat})
LOGGER.info('%s --- %s:' % (company.symbol, financials))
timestamp = get_time()
data.set_financial_data(
company=company,
symbol=company.symbol,
date=timestamp,
**financials
)
def yahoo_finance(sleep_time):
companies = list(data.get_companies())
companies = [companies[i:i+BATCH] for i in range(0, len(companies), BATCH)]
for i, batch in enumerate(companies):
if i > 0: time.sleep(sleep_time)
batch = dict([(c.symbol, c) for c in batch])
url = 'https://query.yahooapis.com/v1/public/yql'
params = {
'q': 'select * from yahoo.finance.quotes where symbol IN ("%s")' % '", "'.join(batch.keys()),
'format': 'json',
'env': 'http://datatables.org/alltables.env',
}
response = requests.get(url, params=params)
body = response.json()
LOGGER.info('Getting quotes: %s' % ', '.join(batch.keys()))
for item in body['query']['results']['quote']:
company = batch[item['symbol']]
timestamp = get_time()
data.set_financial_data(
company=company,
symbol=company.symbol,
date=timestamp,
ask=decode_money(item.get('Ask')),
market_cap=decode_money(item.get('MarketCapitalization')),
ebitda=decode_money(item.get('EBITDA')),
pe_ratio_ttm=decode_float(item.get('PERatio')),
peg_ratio=decode_float(item.get('PEGRatio')),
DividendYield = decode_float(item.get('DividendYield')),
OneyrTargetPrice = decode_float(item.get('OneyrTargetPrice')),
EPSEstimateCurrentYear = decode_float(item.get('EPSEstimateCurrentYear')),
EPSEstimateNextYear = decode_float(item.get('EPSEstimateNextYear')),
EPSEstimateNextQuarter = decode_float(item.get('EPSEstimateNextQuarter')),
)
def quandl(sleep_time):
companies = list(data.get_companies())
companies = chunks(companies, BATCH)
work = []
for c in companies:
t = threading.Thread(target=get_quandl(c))
work.append(t)
t.start()
def yahoo_roa(sleep_time):
companies = list(data.get_companies())
companies = chunks(companies, BATCH)
work = []
for c in companies:
t = threading.Thread(target=get_yahoo_roa(c))
work.append(t)
t.start()
def main():
common.setup_logging()
parser = argparse.ArgumentParser()
parser.add_argument('--sleep-time', dest='sleep_time', type=float, default=1)
subparsers = parser.add_subparsers()
parser_yahoo_finance = subparsers.add_parser('yahoo_finance')
parser_yahoo_finance.set_defaults(func=yahoo_finance)
parser_quandl = subparsers.add_parser('quandl')
parser_quandl.set_defaults(func=quandl)
parser_yahoo_roa = subparsers.add_parser('yahoo_roa')
parser_yahoo_roa.set_defaults(func=yahoo_roa)
args = parser.parse_args()
args.func(sleep_time=args.sleep_time)
if __name__ == '__main__':
main()
|
Assembler.py
|
try:
from source.Database_generator import *
from source.Taxonomy_SQLITE_Connector import Taxonomy_SQLITE_Connector
from source.Metadata_SQLITE_Connector import Metadata_SQLITE_Connector
except:
from Database_generator import *
from Taxonomy_SQLITE_Connector import Taxonomy_SQLITE_Connector
from Metadata_SQLITE_Connector import Metadata_SQLITE_Connector
def setup_databases(force_download=False, chunk_size=None,no_taxonomy=False,mantis_config=None,cores=None):
print_cyan('Setting up databases')
if force_download == 'None': force_download = None
if chunk_size: chunk_size = int(chunk_size)
if cores: cores=int(cores)
mantis = Assembler(hmm_chunk_size=chunk_size, mantis_config=mantis_config,user_cores=cores,no_taxonomy=no_taxonomy)
mantis.setup_databases(force_download)
def check_installation(mantis_config=None,no_taxonomy=False,check_sql=False):
yellow('Checking installation')
mantis = Assembler(mantis_config=mantis_config,no_taxonomy=no_taxonomy)
mantis.check_installation(check_sql=check_sql)
class Assembler(Database_generator,Taxonomy_SQLITE_Connector):
def __init__(self, verbose=True, redirect_verbose=None,no_taxonomy=False, mantis_config=None,
hmm_chunk_size=None,keep_files=False,user_cores=None):
self.redirect_verbose = redirect_verbose
self.keep_files = keep_files
self.verbose = verbose
if no_taxonomy: self.use_taxonomy = False
else: self.use_taxonomy=True
self.mantis_config = mantis_config
self.user_cores = user_cores
self.broken_merged_hmms = set()
self.clean_merged_hmms = set()
self.start_time = time()
# to speed up hmm search we split very big hmms into smaller chunks - better job distribution
if hmm_chunk_size == 0:
self.hmm_chunk_size = None
elif hmm_chunk_size is None:
self.hmm_chunk_size = 5000
else:
self.hmm_chunk_size = hmm_chunk_size
self.read_config_file()
Taxonomy_SQLITE_Connector.__init__(self,resources_folder=self.mantis_paths['resources'])
#self.requirements_met()
# I use manager instead of queue since I need to be able to add records to the end and start of the 'queue' (actually a list) which is not possible with the multiprocessing.Queue
# we need manager.list because a normal list cant communicate during multiprocessing
self.manager = Manager()
self.queue = self.manager.list()
def __str__(self):
custom_refs = self.get_custom_refs_paths(folder=True)
custom_refs_str = ''
custom_res=''
for cref in custom_refs:
custom_refs_str += cref + '\n'
if custom_refs_str:
custom_res = f'# Custom references:\n{custom_refs_str}'
res=[]
if hasattr(self, 'output_folder'):
res.append(f'Output folder:\nself.output_folder')
res.append(f'Default references folder:\n{self.mantis_paths["default"]}')
res.append(f'Custom references folder:\n{self.mantis_paths["custom"]}')
if self.mantis_paths['NOG'][0:2] != 'NA':
res.append(f'TAX NOG references folder:\n{self.mantis_paths["NOG"]}')
if self.mantis_paths['NCBI'][0:2] != 'NA':
res.append(f'TAX NCBI references folder:\n{self.mantis_paths["NCBI"]}')
if self.mantis_paths['pfam'][0:2] != 'NA':
res.append(f'Pfam reference folder:\n{self.mantis_paths["pfam"]}')
if self.mantis_paths['kofam'][0:2] != 'NA':
res.append(f'KOfam reference folder:\n{self.mantis_paths["kofam"]}')
if self.mantis_paths['tcdb'][0:2] != 'NA':
res.append(f'TCDB reference folder:\n{self.mantis_paths["tcdb"]}')
res.append('------------------------------------------')
res='\n'.join(res)
if custom_res: res+='\n'+custom_res
ref_weights=', '.join([f'{i}:{self.mantis_ref_weights[i]}' for i in self.mantis_ref_weights if i!='else'])
if ref_weights:
res+= f'# Weights:\n{ref_weights}\n'
nog_tax=', '.join([i for i in self.mantis_nogt_tax])
if nog_tax:
res+= f'# NOG tax IDs:\n{nog_tax}\n'
return res
def requirements_met(self):
for f in [self.is_conda_available(), self.is_hmmer_available()]:
if not f:
kill_switch(RequirementsNotMet)
def is_conda_available(self):
process = run_command('conda -V', get_output=True)
check = re.search('conda', str(process.stdout))
if not check:
print_cyan('Conda dependency not met!')
print_cyan('Install Conda on your system by following the instructions at: https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html')
return check
def is_hmmer_available(self):
check_command = ' conda list hmmer '
process = run_command(check_command, get_output=True)
check = re.search('hmmer', str(process.stdout))
if not check:
print_cyan('HMMER dependency not met!')
print_cyan('Install HMMER on your conda environment by doing the following:')
print_cyan('conda activate <conda_environment>')
print_cyan('conda install -c bioconda hmmer')
return check
def check_internet_connection(self):
try:
requests.get("http://www.google.com")
return True
except requests.ConnectionError:
print("Could not connect to internet!\nIf you would like to run offline make sure you introduce organism NCBI IDs instead of synonyms!")
return False
def get_default_ref_path(self):
file = open(self.config_file, 'r')
line = file.readline()
while line:
line = line.strip('\n')
if '#' not in line:
# data sources configuration
if 'default_ref_folder=' in line:
line_path = add_slash(line.replace('default_ref_folder=', ''))
if line_path:
default_ref_path = line_path
return default_ref_path
line = file.readline()
file.close()
def set_nogt_line(self, line_path):
if line_path:
res = set()
tax_ids = [i for i in line_path.split(',')]
if tax_ids:
for t_id in tax_ids:
try:
ncbi_taxon_id = int(t_id)
organism_lineage = self.fetch_ncbi_lineage(ncbi_taxon_id)
res.update(organism_lineage)
except:
ncbi_taxon_id = self.get_taxa_ncbi(t_id)
if ncbi_taxon_id:
organism_lineage = self.fetch_ncbi_lineage(ncbi_taxon_id)
res.update(organism_lineage)
for i in res:
self.mantis_nogt_tax.add(str(i))
def setup_paths_config_file(self):
self.nog_db = 'dmnd'
file = open(self.config_file, 'r')
line = file.readline()
nogt_line=None
while line:
line = line.strip('\n')
if not line.startswith('#') and line:
# data sources configuration
if line.startswith('custom_ref_folder='):
line_path = add_slash(line.replace('custom_ref_folder=', ''))
if line_path: self.mantis_paths['custom'] = line_path
elif line.startswith('resources_folder='):
line_path = add_slash(line.replace('resources_folder=', ''))
if line_path:
self.mantis_paths['resources'] = line_path
elif line.startswith('nog_ref_folder='):
line_path = add_slash(line.replace('nog_ref_folder=', ''))
if line_path: self.mantis_paths['NOG'] = line_path
# taxa ids list for only downloading nogt specific to lineage
elif line.startswith('nog_tax='):
nogt_line = line.replace('nog_tax=', '')
elif line.startswith('pfam_ref_folder='):
line_path = add_slash(line.replace('pfam_ref_folder=', ''))
if line_path: self.mantis_paths['pfam'] = line_path
elif line.startswith('kofam_ref_folder='):
line_path = add_slash(line.replace('kofam_ref_folder=', ''))
if line_path: self.mantis_paths['kofam'] = line_path
elif line.startswith('ncbi_ref_folder='):
line_path = add_slash(line.replace('ncbi_ref_folder=', ''))
if line_path: self.mantis_paths['NCBI'] = line_path
elif line.startswith('tcdb_ref_folder='):
line_path = add_slash(line.replace('tcdb_ref_folder=', ''))
if line_path: self.mantis_paths['tcdb'] = line_path
elif line.startswith('_weight='):
ref_source, weight = line.split('_weight=')
self.mantis_ref_weights[ref_source] = float(weight)
elif line.startswith('nog_ref='):
nog_db = line.replace('nog_ref=', '').split()[0]
if nog_db.lower() not in ['dmnd','hmm']:
kill_switch(InvalidNOGType)
else:
self.nog_db=nog_db
line = file.readline()
file.close()
if self.use_taxonomy:
if nogt_line:
if self.launch_taxonomy_connector():
self.set_nogt_line(nogt_line)
def read_config_file(self):
self.mantis_ref_weights = {'else': 0.7}
self.mantis_nogt_tax = set()
if self.mantis_config:
print(f'Using custom MANTIS.config: {self.mantis_config}', flush=True, file=self.redirect_verbose)
self.config_file = self.mantis_config
else:
if not os.path.isdir(MANTIS_FOLDER):
print('Make sure you are calling the folder to run this package, like so:\n python mantis/ <command>\n ',
flush=True, file=self.redirect_verbose)
raise FileNotFoundError
self.config_file = MANTIS_FOLDER + 'MANTIS.config'
try:
open(self.config_file, 'r')
except:
print('MANTIS.config file has been deleted or moved, make sure you keep it in the root of the project!',
flush=True, file=self.redirect_verbose)
raise FileNotFoundError
default_ref_path = self.get_default_ref_path()
# if there's no path, we just assume its in the default folder
if not default_ref_path: default_ref_path = add_slash(MANTIS_FOLDER + 'References')
resources_path = add_slash(MANTIS_FOLDER + 'Resources')
self.mantis_paths = {'default': default_ref_path,
'resources': resources_path,
'custom': add_slash(default_ref_path + 'Custom_references'),
'NOG': add_slash(default_ref_path + 'NOG'),
'pfam': add_slash(default_ref_path + 'pfam'),
'kofam': add_slash(default_ref_path + 'kofam'),
'NCBI': add_slash(default_ref_path + 'NCBI'),
'tcdb': add_slash(default_ref_path + 'tcdb'),
}
self.setup_paths_config_file()
if not self.use_taxonomy:
self.mantis_paths['NOG']=f'NA{SPLITTER}'
self.mantis_paths['NCBI']=f'NA{SPLITTER}'
if not os.path.isdir(self.mantis_paths['custom']):
Path(self.mantis_paths['custom']).mkdir(parents=True, exist_ok=True)
if self.verbose: print(self, flush=True, file=self.redirect_verbose)
def order_by_size_descending(self, refs_list):
res = {}
for ref in refs_list:
res[ref] = os.stat(ref).st_size
# mixing big and low size HMMs so that we try not to run out of memory, might lead to more idle time.
sorted_res = sorted(res, key=res.get, reverse=True)
resorted_res = []
c = 1
while sorted_res:
if c == 1:
resorted_res.append(sorted_res.pop(0))
c = -1
elif c == -1:
resorted_res.append(sorted_res.pop(-1))
c = 1
return resorted_res
def compile_refs_list(self, folder=False):
# doesnt include NOG or NCBI
refs_list = []
default_list = [
get_ref_in_folder(self.mantis_paths['pfam']) if not folder else self.mantis_paths['pfam'],
get_ref_in_folder(self.mantis_paths['kofam']) if not folder else self.mantis_paths['kofam'],
get_ref_in_folder(self.mantis_paths['tcdb']) if not folder else self.mantis_paths['tcdb'],
]
for ref_path in self.get_custom_refs_paths(folder):
if ref_path[0:2] != 'NA':
refs_list.append(ref_path)
for ref_path in default_list:
if ref_path and ref_path[0:2] != 'NA':
refs_list.append(ref_path)
return refs_list
#####SETTING UP DATABASE#####
def get_path_default_ref(self, database, taxon_id=None):
target_file = None
if 'kofam' in database.lower():
target_file = get_ref_in_folder(self.mantis_paths['kofam'])
elif 'pfam' in database.lower():
target_file = get_ref_in_folder(self.mantis_paths['pfam'])
elif 'tcdb' in database.lower():
target_file = get_ref_in_folder(self.mantis_paths['tcdb'])
elif 'NOG'.lower() in database.lower():
if not taxon_id: taxon_id = 'NOGG'
target_file = get_ref_in_folder(self.mantis_paths['NOG'] + taxon_id)
elif 'NCBI'.lower() in database.lower():
if not taxon_id: taxon_id = 'NCBIG'
target_file = get_ref_in_folder(self.mantis_paths['NCBI'] + taxon_id)
return target_file
def check_reference_exists(self, database, taxon_id=None, force_download=False):
ncbi_resources=add_slash(self.mantis_paths['resources']+'NCBI')
if database == 'ncbi_res':
if file_exists(ncbi_resources + 'gc.prt', force_download) and \
file_exists(ncbi_resources + 'gc.prt', force_download):
return True
elif database == 'taxonomy':
taxonomy_db=self.mantis_paths['resources'] + 'Taxonomy.db'
gtdb_resources = add_slash(self.mantis_paths['resources'] + 'GTDB')
if file_exists(taxonomy_db, force_download):
return True
elif database == 'NOGSQL':
if file_exists(self.mantis_paths['NOG'] + 'eggnog.db', force_download):
return True
elif database == 'tcdb':
if file_exists(self.mantis_paths['tcdb'] + 'tcdb.dmnd', force_download):
return True
elif database == 'NOG_DMND':
if file_exists(self.mantis_paths['NOG'] + 'eggnog_proteins.dmnd', force_download):
return True
target_file = self.get_path_default_ref(database, taxon_id)
if target_file:
if target_file.endswith('.dmnd'):
if not file_exists(target_file, force_download=force_download):
return False
else:
for extension in ['', '.h3f', '.h3i', '.h3m', '.h3p']:
if not file_exists(target_file + extension, force_download=force_download):
return False
else:
return False
return True
#####LISTING HMMS DATABASE#####
def check_installation_extras(self, res, verbose=True):
ncbi_resources=add_slash(self.mantis_paths['resources']+'NCBI')
essential_genes = f'{MANTIS_FOLDER}Resources{SPLITTER}essential_genes/essential_genes.txt'
taxonomy_db=self.mantis_paths['resources']+'Taxonomy.db'
if verbose: yellow('Checking extra files', flush=True, file=self.redirect_verbose)
if not file_exists(essential_genes):
red('Essential genes list is missing, it should be in the github repo!')
if verbose: red('Failed installation check on [files missing]: ' + essential_genes, flush=True, file=self.redirect_verbose)
res.append(self.mantis_paths['resources'] + 'essential_genes/')
else:
if verbose: green('Passed installation check on: ' + self.mantis_paths['resources'] + 'essential_genes',flush=True, file=self.redirect_verbose)
if not file_exists(ncbi_resources + 'gc.prt'):
if verbose: red('Failed installation check on [files missing]: ' + ncbi_resources + 'gc.prt.dmp',flush=True, file=self.redirect_verbose)
res.append(ncbi_resources)
else:
if verbose: green('Passed installation check on: ' + ncbi_resources+'gc.prt.dmp', flush=True,file=self.redirect_verbose)
if self.use_taxonomy:
if not file_exists(taxonomy_db):
if verbose: red(f'Failed installation check on [files missing]: {taxonomy_db}',flush=True, file=self.redirect_verbose)
res.append(taxonomy_db)
else:
if verbose: green(f'Passed installation check on: {taxonomy_db}', flush=True,file=self.redirect_verbose)
return res
def check_chunks_dir(self,chunks_dir):
all_chunks=[]
for hmm in os.listdir(chunks_dir):
if hmm.endswith('.hmm'):
all_chunks.append(hmm)
for hmm in all_chunks:
if not self.check_missing_chunk_files(hmm,chunks_dir):
return False
return True
def check_missing_chunk_files(self,hmm,chunks_dir):
missing_files=['.h3f', '.h3i', '.h3m', '.h3p']
res=0
for inner_file in os.listdir(chunks_dir):
for mf in missing_files:
if inner_file==f'{hmm}{mf}':
res+=1
if res==len(missing_files): return True
red(f'Failed installation check on [files missing]: {hmm} in chunks folder: {chunks_dir}',
flush=True, file=self.redirect_verbose)
return False
def check_installation_folder(self, ref_folder_path, res, verbose=True, extra_requirements=[]):
missing_files = set(extra_requirements)
try:
files_dir = os.listdir(ref_folder_path)
except:
if verbose: red(f'Failed installation check on [path unavailable]: {ref_folder_path}', flush=True,file=self.redirect_verbose)
res.append(ref_folder_path)
self.passed_check = False
return
ref_type=None
for file in files_dir:
if file.endswith('.dmnd'):
ref_type='dmnd'
missing_files.update(['.dmnd'])
elif file.endswith('.hmm'):
ref_type='hmm'
missing_files.update(['.hmm', '.h3f', '.h3i', '.h3m', '.h3p'])
if not ref_type:
if verbose: red(f'Failed installation check on [invalid referecence type]: {ref_folder_path}', flush=True,file=self.redirect_verbose)
res.append(ref_folder_path)
self.passed_check = False
return
check = len(missing_files)
if 'chunks' in files_dir:
if not self.check_chunks_dir(f'{ref_folder_path}chunks'):
self.passed_check = False
return
else:
missing_files = set(extra_requirements)
check = len(missing_files)
for file in files_dir:
if ref_type=='hmm':
if file.endswith('.hmm'):
if '.hmm' in missing_files:
check -= 1
missing_files.remove('.hmm')
elif file.endswith('.h3f'):
if '.h3f' in missing_files:
check -= 1
missing_files.remove('.h3f')
elif file.endswith('.h3i'):
if '.h3i' in missing_files:
check -= 1
missing_files.remove('.h3i')
elif file.endswith('.h3m'):
if '.h3m' in missing_files:
check -= 1
missing_files.remove('.h3m')
elif file.endswith('.h3p'):
if '.h3p' in missing_files:
check -= 1
missing_files.remove('.h3p')
elif ref_type=='dmnd':
if file.endswith('.dmnd'):
check -= 1
missing_files.remove('.dmnd')
if file in extra_requirements:
check -= 1
missing_files.remove(file)
if check != 0:
missing_files_str = '; '.join(missing_files)
red(f'Failed installation check on [files missing]: {ref_folder_path}\n{missing_files_str}',
flush=True, file=self.redirect_verbose)
res.append(ref_folder_path)
else:
if verbose: green(f'Passed installation check on: {ref_folder_path}', flush=True,
file=self.redirect_verbose)
def compile_sql_metadata(self):
all_files=set()
for ref in self.compile_refs_list(folder=True):
metadata_file=f'{ref}metadata.tsv'
all_files.add(metadata_file)
if self.mantis_paths['NCBI'][0:2] != 'NA':
ncbi_tax=self.get_taxon_refs('NCBI',folder=True)
for ref in ncbi_tax:
metadata_file = f'{ref}metadata.tsv'
all_files.add(metadata_file)
if self.mantis_paths['NOG'][0:2] != 'NA':
nog_tax=self.get_taxon_refs('NOG',folder=True)
for ref in nog_tax:
metadata_file = f'{ref}metadata.tsv'
all_files.add(metadata_file)
for metadata_file in all_files:
if not file_exists(metadata_file.replace('.tsv','.db')):
cursor = Metadata_SQLITE_Connector(metadata_file)
cursor.close_sql_connection()
def check_sql_databases(self,ref_dbs):
broken_refs=set()
broken_ids={}
for db in ref_dbs:
yellow(f'Checking {db}metadata.db', flush=True,file=self.redirect_verbose)
cursor = Metadata_SQLITE_Connector(f'{db}metadata.tsv')
db_res=cursor.test_database()
if db_res: broken_refs.add(db)
if db_res: broken_ids[db]=db_res
cursor.close_sql_connection()
for db in broken_ids:
red(f'Failed SQL check in {db} for the following IDs:\n{broken_ids[db]}', flush=True,file=self.redirect_verbose)
if not broken_refs:
green('------------------------------------------', flush=True, file=self.redirect_verbose)
green('-------------SQL CHECK PASSED-------------', flush=True, file=self.redirect_verbose)
green('------------------------------------------', flush=True, file=self.redirect_verbose)
else:
red('------------------------------------------', flush=True, file=self.redirect_verbose)
red('-------------SQL CHECK FAILED-------------', flush=True, file=self.redirect_verbose)
red('------------------------------------------', flush=True, file=self.redirect_verbose)
def check_installation(self, verbose=True,check_sql=False):
# we use the verbose mode when running the check_installation directly
self.compile_sql_metadata()
self.passed_check = True
ref_dbs=set()
if not cython_compiled():
self.passed_check = False
if verbose: red('Cython needs to be compiled!', flush=True,
file=self.redirect_verbose)
else:
if verbose: green('Cython correctly compiled!', flush=True, file=self.redirect_verbose)
res = []
res = self.check_installation_extras(res, verbose)
if verbose: yellow('Checking references installation', flush=True, file=self.redirect_verbose)
requirements = {
self.mantis_paths['pfam']: ['metadata.tsv'],
self.mantis_paths['tcdb']: ['metadata.tsv'],
self.mantis_paths['kofam']: ['metadata.tsv'],
}
# per tax level FOR EGGNOG
if self.mantis_paths['NOG'][0:2] != 'NA':
tax_refs = self.get_taxon_refs(db='NOG', folder=True)
if not tax_refs:
if verbose: red('Failed installation check on [path unavailable]: ' + self.mantis_paths['NOG'],flush=True, file=self.redirect_verbose)
res.append(self.mantis_paths['NOG'])
for tax_ref_folder in tax_refs:
self.check_installation_folder(tax_ref_folder, res, verbose=False,extra_requirements=['metadata.tsv'])
ref_dbs.add(tax_ref_folder)
nogt_check = [i for i in res if self.mantis_paths['NOG'] in i]
if not nogt_check:
if verbose: green('Passed installation check on: ' + self.mantis_paths['NOG'], flush=True,file=self.redirect_verbose)
# per tax level FOR NCBI
if self.mantis_paths['NCBI'][0:2] != 'NA':
#checking those already present
tax_refs = self.get_taxon_refs(db='NCBI', folder=True)
if not tax_refs:
if verbose: red('Failed installation check on [path unavailable]: ' + self.mantis_paths['NCBI'],
flush=True, file=self.redirect_verbose)
res.append(self.mantis_paths['NCBI'])
for tax_ref_folder in tax_refs:
# we skip the taxon 1 since it has no hmms
self.check_installation_folder(tax_ref_folder, res, verbose=False, extra_requirements=['metadata.tsv'])
ref_dbs.add(tax_ref_folder)
ncbi_check = [i for i in res if self.mantis_paths['NCBI'] in i]
if not ncbi_check:
if verbose: green('Passed installation check on: ' + self.mantis_paths['NCBI'], flush=True,
file=self.redirect_verbose)
for ref_folder in self.compile_refs_list(folder=True):
if ref_folder in requirements:
self.check_installation_folder(ref_folder, res, verbose, extra_requirements=requirements[ref_folder])
else:
self.check_installation_folder(ref_folder, res, verbose)
ref_dbs.add(ref_folder)
if res:
self.passed_check = False
fail_res = ''
for i in res: fail_res += f'{i}\n'
if verbose: red(f'Installation check failed on:\n{fail_res}', flush=True, file=self.redirect_verbose)
if self.passed_check:
if verbose:
green('------------------------------------------', flush=True, file=self.redirect_verbose)
green('--------INSTALLATION CHECK PASSED!--------', flush=True, file=self.redirect_verbose)
green('------------------------------------------', flush=True, file=self.redirect_verbose)
else:
print_cyan('Installation check passed', flush=True, file=self.redirect_verbose)
else:
if verbose:
yellow('------------------------------------------', flush=True, file=self.redirect_verbose)
red('--------INSTALLATION CHECK FAILED!--------', flush=True, file=self.redirect_verbose)
yellow('------------------------------------------', flush=True, file=self.redirect_verbose)
else:
print_cyan('Installation check failed', flush=True, file=self.redirect_verbose)
if check_sql: self.check_sql_databases(ref_dbs)
def get_custom_refs_paths(self, folder=False):
try:
custom_refs_folders = os.listdir(self.mantis_paths['custom'])
for potential_ref_folder in custom_refs_folders:
try:
files = os.listdir(self.mantis_paths['custom'] + potential_ref_folder)
for potential_file in files:
if potential_file.endswith('.hmm') or potential_file.endswith('.dmnd'):
if folder:
try:
yield add_slash(self.mantis_paths['custom'] + potential_ref_folder)
except GeneratorExit:
return ''
else:
try:
yield add_slash(self.mantis_paths['custom'] + potential_ref_folder) + potential_file
except GeneratorExit:
return ''
except:
pass
except:
print('Custom references folder is missing, did you correctly set the path? If path is not set make sure you didn\'t delete the custom_ref folder!',
flush=True, file=self.redirect_verbose)
self.passed_check = False
return
with open(self.config_file, 'r') as file:
line = file.readline()
while line:
if line[0] != '#':
if 'custom_ref=' in line:
line = line.strip('\n')
ref_path=line.replace('custom_ref=', '')
if not (ref_path.endswith('.hmm') or ref_path.endswith('.dmnd')):
if os.path.isdir(ref_path):
for inner_file in os.listdir(ref_path):
if inner_file.endswith('.hmm') or inner_file.endswith('.dmnd'):
ref_path=add_slash(ref_path)+inner_file
if folder:
try:
yield add_slash(SPLITTER.join(ref_path.split(SPLITTER)[:-1]))
except GeneratorExit:
return ''
else:
try:
yield ref_path
except GeneratorExit:
return ''
line = file.readline()
def get_taxon_ref_path(self, taxon_id, db):
tax_refs = self.get_local_ref_taxon_ids(db=db)
if taxon_id in tax_refs:
if db=='NOG' and self.nog_db == 'dmnd':
return add_slash(self.mantis_paths[db] + taxon_id) + f'{taxon_id}.dmnd'
else:
return add_slash(self.mantis_paths[db] + taxon_id) + f'{taxon_id}_merged.hmm'
else:
return None
def get_ref_taxon_ids(self, db):
res = set()
if not file_exists(self.mantis_paths[db]): return res
if db=='NOG':
available_taxon_ids = self.get_taxon_ids_eggNOG()
if self.mantis_nogt_tax:
for tax_id in self.mantis_nogt_tax:
if tax_id in available_taxon_ids:
res.add(tax_id)
return res
else:
return set(available_taxon_ids)
else:
for i in os.listdir(self.mantis_paths[db]):
if re.search('\d+',i): res.add(i)
return res
def get_local_ref_taxon_ids(self,db):
res = set()
if file_exists(self.mantis_paths[db]):
if db=='NOG':
if self.mantis_nogt_tax:
for i in self.mantis_nogt_tax:
res.add(i)
for i in os.listdir(self.mantis_paths[db]):
if re.search('\d+', i): res.add(i)
return res
def get_taxon_refs(self, db, folder=False):
#locally available taxon ids
local_taxon_ids = self.get_local_ref_taxon_ids(db)
#all taxon ids
taxon_ids=self.get_ref_taxon_ids(db)
res = []
for t in taxon_ids:
if t in local_taxon_ids:
if folder:
res.append(add_slash(self.mantis_paths[db] + t))
else:
if self.nog_db=='hmm':
res.append(add_slash(self.mantis_paths[db] + t) + f'{t}_merged.hmm')
else:
res.append(add_slash(self.mantis_paths[db] + t) + f'{t}_merged.dmnd')
global_folder = add_slash(self.mantis_paths[db] + db + 'G')
if folder:
if file_exists(global_folder):
res.append(global_folder)
else:
if self.nog_db == 'hmm':
if file_exists(f'{global_folder}{db}G_merged.hmm'):
res.append( f'{global_folder}{db}G_merged.hmm')
else:
if file_exists(f'{global_folder}{db}G_merged.dmnd'):
res.append( f'{global_folder}{db}G_merged.dmnd')
return res
def processes_handler(self, target_worker_function, worker_count, add_sentinels=True):
'''
this will first generate one process per worker, then we add sentinels to the end of the list which will basically tell us when the queue is empty
if we need to add new work (e.g. when doing taxa annotation) we just add the new work to the start of the list
'''
# os.getpid to add the master_pid
processes = [Process(target=target_worker_function, args=(self.queue, os.getpid(),)) for _ in
range(worker_count)]
# adding sentinel record since queue can be signaled as empty when its really not
if add_sentinels:
for _ in range(worker_count): self.queue.append(None)
for process in processes:
process.start()
# we could manage the processes memory here with a while cycle
for process in processes:
process.join()
# exitcode 0 for sucessful exists
if process.exitcode != 0:
sleep(5)
print('Ran into an issue, check the log for details. Exitting!')
os._exit(1)
if __name__ == '__main__':
p = Assembler(mantis_config='/media/HDD/data/mantis_references/MANTIS.config')
|
test_utils.py
|
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import queue
from unittest.mock import patch, call
import pytest
@pytest.fixture
def mock_queue(monkeypatch):
class MockQueue:
items = []
def get(self, timeout=None):
try:
return self.items.pop()
except IndexError:
if timeout:
raise queue.Empty()
raise
def put(self, item):
self.items.append(item)
mockqueue = MockQueue()
monkeypatch.setattr('queue.Queue', lambda: mockqueue)
return mockqueue
def test_empty_pool_is_populated_with_instances(mock_queue):
from bigchaindb import utils
pool = utils.pool(lambda: 'hello', 4)
assert len(mock_queue.items) == 0
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 1
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 2
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 3
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 4
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 4
def test_pool_blocks_if_no_instances_available(mock_queue):
from bigchaindb import utils
pool = utils.pool(lambda: 'hello', 4)
assert len(mock_queue.items) == 0
# We need to manually trigger the `__enter__` method so the context
# manager will "hang" and not return the resource to the pool
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
# We need to keep a reference of the last context manager so we can
# manually release the resource
last = pool()
assert last.__enter__() == 'hello'
assert len(mock_queue.items) == 0
# This would block using `queue.Queue` but since we mocked it it will
# just raise a IndexError because it's trying to pop from an empty list.
with pytest.raises(IndexError):
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
# Release the last resource
last.__exit__(None, None, None)
assert len(mock_queue.items) == 1
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
def test_pool_raises_empty_exception_when_timeout(mock_queue):
from bigchaindb import utils
pool = utils.pool(lambda: 'hello', 1, timeout=1)
assert len(mock_queue.items) == 0
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 1
# take the only resource available
assert pool().__enter__() == 'hello'
with pytest.raises(queue.Empty):
with pool() as instance:
assert instance == 'hello'
@patch('multiprocessing.Process')
def test_process_group_instantiates_and_start_processes(mock_process):
from bigchaindb.utils import ProcessGroup
def noop():
pass
concurrency = 10
pg = ProcessGroup(concurrency=concurrency, group='test_group', target=noop)
pg.start()
mock_process.assert_has_calls([call(group='test_group', target=noop,
name=None, args=(), kwargs={},
daemon=None)
for i in range(concurrency)], any_order=True)
for process in pg.processes:
process.start.assert_called_with()
def test_lazy_execution():
from bigchaindb.utils import Lazy
lz = Lazy()
lz.split(',')[1].split(' ').pop(1).strip()
result = lz.run('Like humans, cats tend to favor one paw over another')
assert result == 'cats'
class Cat:
def __init__(self, name):
self.name = name
cat = Cat('Shmui')
lz = Lazy()
lz.name.upper()
result = lz.run(cat)
assert result == 'SHMUI'
def test_process_set_title():
from uuid import uuid4
from multiprocessing import Queue
from setproctitle import getproctitle
from bigchaindb.utils import Process
queue = Queue()
uuid = str(uuid4())
process = Process(target=lambda: queue.put(getproctitle()),
name=uuid)
process.start()
assert queue.get() == uuid
|
test_autograd.py
|
# Owner(s): ["module: autograd"]
import contextlib
import gc
import io
import math
import os
import random
import sys
import tempfile
import threading
import time
import unittest
import uuid
import warnings
import operator
import subprocess
from copy import deepcopy
from collections import OrderedDict
from itertools import product
from operator import mul
from functools import reduce
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, record_function, emit_nvtx)
from torch.autograd.profiler_util import (_format_time, EventList, FunctionEvent, FunctionEventAvg)
from torch.utils.checkpoint import checkpoint
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoLapack, slowTest, IS_WINDOWS, IS_MACOS,
disable_gc, gradcheck, gradgradcheck, parametrize, instantiate_parametrized_tests)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import mask_not_all_zeros
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipMeta)
from torch.testing._internal.common_dtype import get_all_dtypes
from torch.testing._internal.logging_tensor import no_dispatch
import pickle
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.atan2, choose a different op
torch.atan2(dual_x, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
def test_accumulate_grad_with_zero_numel_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_grad_batched_grad(self):
x = torch.randn(2, 2, requires_grad=True)
out = x.clone() # Size([2, 2])
batched_grad = torch.arange(3).expand(2, 2, 3).transpose(0, 2) # Size([3, 2, 2])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# Detect shape mismatch
grad_out = torch.ones(2, 2)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
# Scalar outputs
out = x.sum() # Size([])
batched_grad = torch.arange(3) # Size([3])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# We consider scalar and sized-1 to be a mismatch. This is consistent with current non-batched behavior.
grad_out = torch.ones(2).unsqueeze(1)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def test_detach_then_inplace_raises_in_autograd(self):
x = torch.randn([], requires_grad=True)
orig_x = x.detach().clone()
y = x ** 2 # saves x
z = x.detach()
z.zero_()
with self.assertRaisesRegex(RuntimeError, "has been modified by an inplace"):
y.backward()
def test_detach_disallows_metadata_change(self):
x = torch.randn([], requires_grad=True)
detached = x.detach()
with self.assertRaisesRegex(
RuntimeError, "not allowed on a Tensor created from .data or .detach()"):
detached.resize_(3, 3)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertEqual(r1, r2, rtol=0.01, atol=0.0)
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertEqual(input1.grad, input2.grad, rtol=0.01, atol=0.0)
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X, largest=largest)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = (A @ A.mT) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.mT)
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEqual(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
# Ignore record_function user scope.
if "autograd::engine::evaluate_function" in e.name:
continue
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_record_function_new_signatures(self):
# Test the new _record_function ops work
# Note: Remove once record_function uses these directly
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
record = torch.ops.profiler._record_function_enter_new("bar", None)
try:
y = x * 2 + 4
finally:
torch.ops.profiler._record_function_exit(record)
function_events = p.function_events
foo_event = [event for event in function_events if "bar" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
_format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
imag_key = 'imag'
self.assertRaises(RuntimeError, lambda: hasattr(x, imag_key))
self.assertTrue(hasattr(y, imag_key))
keys.remove(imag_key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward0")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
def test_out_variant_raises_when_inputs_require_grad(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.expectedFailure
def test_gradcheck_sparse_csr_input(self):
def check(fast_mode):
def fn(sparse_csr):
return torch.clone(sparse_csr).to_dense()
# Fails because gradcheck can't work with sparse csr inputs yet
gradcheck(fn, torch.rand(2, 2, dtype=torch.double).to_sparse_csr().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(2, 2, dtype=torch.double).to_sparse_csr().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
# check(fast_mode=True) # Segmentation fault
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_gradcheck_forward_ad_runs_with_no_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
# This test checks that when the inputs are passed to the function they should not have
# requires_grad=True even though they may have requires_grad=True when passed
# to gradcheck
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
if fwAD._current_level >= 0:
self.assertFalse(x.requires_grad)
self.assertFalse(y.requires_grad)
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t, y_t
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
def test_gradcheck_forward_ad_respects_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
jvp_count = [0]
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
jvp_count[0] += 1
return x_t, y_t
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 2) # (2) once per input
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 6) # (+4): (once with normal ZT (+1), once with efficient ZT (+1)) for each input (x2)
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 12) # (+6): (compute batch of 2 with vmap (+1), with a loop (+2)) for each input (x2)
jvp_count = [0]
# Repeat the previous test except we mark one input with requires_grad=False
# NB: _test_undefined_forward_mode is only (+1), when function has single differentiable input, not (+2)!
# Otherwise, other counts are halved.
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 5) # 1 + 1 + 3
def test_gradcheck_check_forward_or_backward_only(self):
"""Depending on settings for check_forward_ad and check_backward_ad, the
correct codepaths should be reached (or not reached)
"""
fwd_fail_err_msg = "FAIL FWD"
bwd_fail_err_msg = "FAIL BWD"
class UserFn(Function):
@staticmethod
def forward(ctx, foo, fwd_bad, bwd_bad):
ctx.fwd_bad = fwd_bad
ctx.bwd_bad = bwd_bad
return foo * 2
@staticmethod
def vjp(ctx, gO):
if ctx.bwd_bad:
raise RuntimeError(bwd_fail_err_msg)
else:
return 2 * gO, None, None
@staticmethod
def jvp(ctx, gI, _1, _2):
if ctx.fwd_bad:
raise RuntimeError(fwd_fail_err_msg)
else:
return 2 * gI
for fast_mode in (True, False):
for check_forward_ad in (True, False):
for check_backward_ad in (True, False):
for fwd_bad in (True, False):
for bwd_bad in (True, False):
fwd_should_fail = fwd_bad and check_forward_ad
bwd_should_fail = bwd_bad and check_backward_ad
def run():
gradcheck(UserFn.apply, (x, fwd_bad, bwd_bad), check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad, check_undefined_grad=check_backward_ad,
check_batched_grad=check_backward_ad, fast_mode=fast_mode)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
if not check_forward_ad and not check_backward_ad:
with self.assertRaisesRegex(AssertionError, "Expected at least one of"):
run()
continue
if not fwd_should_fail and not bwd_should_fail:
run()
else:
# If both fail, backward AD failure "hides" forward AD failure
if fwd_should_fail:
fail_msg = fwd_fail_err_msg
if bwd_should_fail:
fail_msg = bwd_fail_err_msg
with self.assertRaisesRegex(RuntimeError, fail_msg):
run()
def test_gradcheck_forward_ad_batched_grad(self):
x = torch.rand(2, dtype=torch.double, requires_grad=True)
# multiple inputs and outputs with non-tensors inputs
def fn1(a: torch.Tensor, b: int):
return a.clone(), a + 1
gradcheck(fn1, (x, 1), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
# unrelated inputs: tangent for c is None
def fn2(a: torch.Tensor, c: torch.Tensor):
return a.clone()
gradcheck(fn2, (x, x.clone()), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
class Fn(Function):
@staticmethod
def forward(ctx, foo):
return foo * 2
@staticmethod
def vjp(ctx, gO):
return gO * 2
@staticmethod
def jvp(ctx, gI):
torch.randn_like(gI)
return gI * 2
msg = "vmap: We do not yet support calling random operations inside of vmap"
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(Fn.apply, (x,), check_forward_ad=True, check_batched_forward_grad=True)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
# The autograd engine creates worker threads only when GPU devices are present.
# So make sure that we do shutdown threads when we're testing cuda and make sure
# that there is no thread to shutdown when we're not using cuda.
if TEST_CUDA:
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
else:
self.assertNotRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
@slowTest
@parametrize("input_requires_grad", [True, False])
def test_checkpointing_without_reentrant(self, input_requires_grad):
"""
Basic test for checkpoint without reentrant autograd.
"""
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
# Run model with and without checkpointing and verify gradients are
# equivalent, regardless of if inputs require grads or not.
module_copy = deepcopy(module)
feat_combined = []
feat_combined_no_checkpoint = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = input_requires_grad
data_r_copy = data_r.clone()
feat_r = checkpoint(module, data_r, use_reentrant=False)
feat_combined.append(feat_r)
feat_r_no_checkpoint = module_copy(data_r)
feat_combined_no_checkpoint.append(feat_r_no_checkpoint)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
mean_combined_no_checkpoint = torch.stack(feat_combined_no_checkpoint).mean()
mean_combined_no_checkpoint.backward()
for checkpoint_param, param in zip(module.parameters(), module_copy.parameters()):
self.assertEqual(checkpoint_param.grad, param.grad)
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
@parametrize("use_reentrant", [True, False])
def test_checkpointing_without_reentrant_detached_tensor(self, use_reentrant):
class NoGradModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.lin2 = nn.Linear(2, 2, bias=False)
def forward(self, x):
with torch.no_grad():
return self.lin2(self.linear(x))
module = NoGradModule()
err_ctx = (
self.assertRaisesRegex(
RuntimeError,
"none of output has requires_grad=True"
)
if use_reentrant
else contextlib.suppress()
)
a = torch.randn(2, 2, requires_grad=True)
for _ in range(3):
with err_ctx:
# out does not require grad
out = checkpoint(module, a, use_reentrant=use_reentrant)
# Make loss require grad, otherwise we would run into
# "element 0 of tensors does not require grad and does not have a grad_fn"
out += a
out.sum().backward()
def test_checkpointing_without_reentrant_correct_grad(self):
"""
Verifies that correct gradients are calculated for checkpoint
without reentrant autograd, for both backward() and autograd.grad().
"""
a = torch.randn(2, 2, requires_grad=True)
b = torch.exp(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
c = checkpoint(torch.exp, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
a.grad = None
d = checkpoint(torch.exp, a, use_reentrant=False).sum()
d_grad, = torch.autograd.grad(d, (a,))
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_checkpointing_without_reentrant_dataparallel(self):
"""
Verifies gradient correctness when checkpoint without reentrant autograd
is used in conjunction with DataParallel.
"""
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
def forward(self, inp):
return self.linear(inp)
a = torch.randn(2, 2, requires_grad=True)
if torch.cuda.is_available():
a = a.cuda()
model = LinearModule()
if torch.cuda.is_available():
model = model.cuda()
b = deepcopy(model)(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
module = torch.nn.DataParallel(deepcopy(model))
c = checkpoint(module, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
self.assertEqual(b_grad, c_grad)
def test_checkpointing_without_reentrant_parameter_used_in_an_out(self):
"""
Ensures that gradient hooks are only called once per tensor.
"""
w = torch.randn(10, 10, requires_grad=True)
count = 0
def hook(grad):
nonlocal count
count += 1
w.register_hook(hook)
x = torch.rand(10, 10, requires_grad=True)
h = w * x # Using w outside the checkpoint
out = checkpoint(lambda x: w * x, h, use_reentrant=False) # Using w inside the checkpoint
out.sum().backward()
# should only call hook once
self.assertEqual(count, 1)
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
"""
Ensures checkpointing without reentrant autograd works with functions
with arbitrary input/output structures.
"""
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(5, 5, bias=False)
def forward(self, dict_input):
tensor = dict_input["tensor"]
return {
"result": self.layer(tensor)
}
model_no_checkpoint = MyModel()
model_checkpoint_without_reentrant = deepcopy(model_no_checkpoint)
inp = {
"tensor": torch.randn(5, 5)
}
out_no_checkpoint = model_no_checkpoint(inp)["result"].sum()
out_checkpoint = checkpoint(
model_checkpoint_without_reentrant,
inp,
use_reentrant=False
)["result"].sum()
self.assertEqual(out_checkpoint, out_no_checkpoint)
out_no_checkpoint.backward()
out_checkpoint.backward()
for param, checkpoint_param in zip(model_no_checkpoint.parameters(), model_checkpoint_without_reentrant.parameters()):
self.assertEqual(param.grad, checkpoint_param.grad)
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward0 is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertEqual(non_inplace_grad, inplace_grad)
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
class BadBw2(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
@staticmethod
def backward(ctx, foo):
return foo
@staticmethod
def vjp(ctx, foo):
return foo
class BadJvp(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
BadBw.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "Implementing both 'backward' and 'vjp'"):
BadBw2.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "must implement the jvp function"):
with fwAD.dual_level():
d = fwAD.make_dual(inp, torch.rand_like(inp))
res = BadJvp.apply(d)
def test_custom_function_forward_mode_view_checks(self):
flag_to_error = {
"ok": None,
"not_a_view": "jvp is not returning a view",
"not_a_view_of_inp": "jvp is not returning a view of the given",
"not_a_view_of_inp_base": "jvp is not returning a view of the same base",
}
class ViewFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.flag = flag
ctx.size = foo.size()
return foo.narrow(0, 0, 2)
@staticmethod
def vjp(ctx, gO):
gI = gO.new_zeros(ctx.size)
gI.narrow(0, 0, 2).copy_(gO)
return gI, None
@staticmethod
def jvp(ctx, gI, _):
res = gI.narrow(0, 0, 2)
if ctx.flag != "ok":
# Break the view in the gradients!
res = res.clone()
if ctx.flag in ["not_a_view_of_inp", "not_a_view_of_inp_base"]:
# Result should be a view, just of the wrong thing
res = res.view_as(res)
return res
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
for flag, msg in flag_to_error.items():
def test_fn(inp):
if flag == "not_a_view_of_inp_base":
inp = inp.view_as(inp)
return ViewFn.apply(inp, flag)
if msg is None:
gradcheck(test_fn, inp, check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(test_fn, inp, check_forward_ad=True)
def test_custom_function_forward_mode_inplace_checks(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.mark_dirty(foo)
ctx.flag = flag
foo.mul_(2)
return foo
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.flag:
# Don't do the change inplace
return 2 * gI
else:
gI.mul_(2)
return gI
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
def test_fn(inp, flag):
inp = inp.clone()
return InplaceFn.apply(inp, flag)
gradcheck(test_fn, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "inplace custom Function is not modifying the forward mode gradients inplace"):
gradcheck(test_fn, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_wrong_formula(self):
class UserFn(Function):
@staticmethod
def forward(ctx, foo, should_fail):
ctx.should_fail = should_fail
return foo * 2
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.should_fail:
# Wrong gradient formula
return 3 * gI
else:
return 2 * gI
inp = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "Jacobian computed with forward mode mismatch for output 0"):
gradcheck(UserFn.apply, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_non_tensor_before_tensor_args(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, nt, x, nt2, y):
return x * 2 + y * 3
@staticmethod
def jvp(ctx, nt, x_t, nt2, y_t):
self.assertIsNone(nt)
self.assertIsNone(nt2)
return x_t * 2 + y_t * 3
x = torch.tensor(1., dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
y = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, t)
MyFn.apply(1, dual_x, 1, y)
gradcheck(MyFn.apply, (1, x.requires_grad_(True), 1, y.requires_grad_(True)), check_forward_ad=True,
check_backward_ad=False, check_batched_grad=False)
def test_custom_function_forward_mode_forward_is_no_op(self):
error_regex = "A custom Function's forward is returning a view \\(or an input as-is\\)"
return_lambdas = {
# If we return an input as-is in forward, that is treated
# as if self.view_as(self) is performed. If jvp returns x.view_as(x),
# this is OK.
"view_as": lambda x: x.view_as(x),
# Expect this to raise an error
"self": lambda x: x,
# Expect this to raise the same error
"mul_by_2": lambda x: x * 2,
}
for k, fn in return_lambdas.items():
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return x + y, x
@staticmethod
def vjp(ctx, gO1, gO2):
return gO1 + gO2, gO1
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t + y_t, fn(x_t)
a = torch.tensor(1., dtype=torch.double, requires_grad=True)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(1., dtype=torch.double, requires_grad=True)
c = torch.tensor(1., dtype=torch.double)
t2 = torch.tensor(1., dtype=torch.double)
d = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
c_dual = fwAD.make_dual(c, t2)
if k == "view_as":
_, out2 = MyFn.apply(a_dual, b)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t)
_, out2 = MyFn.apply(c_dual, d)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t2)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(a_dual, b)
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(c_dual, d)
if k == "view_as":
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
def test_custom_function_save_for_forward(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
ctx.save_for_backward(x, y)
ctx.save_for_forward(x, y)
ctx.z = z
ctx.prod = x * y
return z * ctx.prod
@staticmethod
def jvp(ctx, x_t, y_t, _):
x_p, y_p = ctx.saved_tensors
z = ctx.z
return z * (y_p * x_t + x_p * y_t)
@staticmethod
def vjp(ctx, grad_out):
x, y = ctx.saved_tensors
z = ctx.z
return z * grad_out * y, z * grad_out * x, None
a = torch.tensor(1., requires_grad=True, dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(2., requires_grad=True, dtype=torch.double)
c = 4
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual, b, c)
out.backward()
gradcheck(Func.apply, (a, b, c), check_forward_ad=True)
# When saved for backward, but not saved for forward
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
ctx.save_for_backward(x)
return x.clone()
@staticmethod
def jvp(ctx, x_t):
self.assertEqual(len(ctx.saved_tensors), 0)
return x_t
@staticmethod
def vjp(ctx, grad_out):
x, = ctx.saved_tensors
self.assertEqual(len(ctx.saved_tensors), 1)
return grad_out
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual)
out.backward()
gradcheck(Func.apply, (a,), check_forward_ad=True)
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: (x, b), lambda x: x)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self):
# Tests that default hooks are properly registered, used and reset
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
# See also:
# - test_saved_variable_packing_unpacking_saved_original_with_hooks
def pack(x):
warnings.warn("pack")
return x
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
a = torch.ones(5, requires_grad=True)
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as w:
y = a * a
# should raise two warnings from a being saved twice
self.assertEqual(len(w), 2)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(2 * a, y.grad_fn._saved_self)
self.assertEqual(2 * a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(4 * a, a.grad)
# Exited hooks correctly
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(self):
# See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
self.assertEqual(y, y.grad_fn._saved_result)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_setting_default_saved_variable_hooks_twice_should_not_fail(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
def test_setting_default_saved_variable_hooks_twice_should_use_inner(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: 3 * x, lambda x: 3 * x):
b = torch.randn(5, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 5 * x, lambda x: 5 * x):
a = torch.randn(5, requires_grad=True)
y = a * a
z = b * b
y.sum().backward()
z.sum().backward()
self.assertEqual(2 * 5 * 5 * a, a.grad)
self.assertEqual(2 * 3 * 3 * b, b.grad)
def test_save_on_cpu_and_checkpoint(self):
a = torch.randn(2, 2, requires_grad=True)
b = a.pow(2).pow(2).pow(2).pow(2)
b.sum().backward()
b_grad = a.grad.clone()
a.grad.zero_()
with torch.autograd.graph.save_on_cpu():
h = a.pow(2)
h = checkpoint(lambda x: x.pow(2).pow(2), h, use_reentrant=False)
c = h.pow(2)
c.sum().backward()
c_grad = a.grad.clone()
a.grad.zero_()
def f(a):
h = a.pow(2)
with torch.autograd.graph.save_on_cpu():
h = h.pow(2).pow(2)
return h.pow(2)
d = checkpoint(f, a, use_reentrant=False)
d.sum().backward()
d_grad = a.grad.clone()
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_pack_hook_with_inplace_modification_should_fail(self):
a = torch.randn(5, requires_grad=True)
def inc(x):
x += 1
return x
with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x):
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y = torch.exp(a)
y = torch.exp(a)
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x)
def test_saving_variable_to_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
def pack(x):
name = os.path.join(tmp_dir, str(uuid.uuid4()))
torch.save(x, name)
return name
def unpack(name):
return torch.load(name)
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
a = torch.ones(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_default_saved_variable_hooks_double_backward(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
self.assertEqual(6 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 2 because only a is saved once
self.assertEqual(6 * 2 * a, a.grad)
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 4 because pow_backward is grad * (exp * self.pow(exp - 1))
# so grad is saved and self (i.e. a) is saved
self.assertEqual(6 * 4 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# combining the two above blocks: 2 * 4 = 8
# note that in that sense, a is saved twice
self.assertEqual(6 * 8 * a, a.grad)
def test_graph_save_on_cpu(self):
def test(get_input, cuda, pin_memory):
with torch.autograd.graph.save_on_cpu(pin_memory):
a = get_input()
if cuda:
a.cuda()
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype)
self.assertEqual(a.layout, y.grad_fn._saved_self.layout)
if y.is_sparse:
y = y.to_dense()
y.sum().backward()
actual = 2 * a
expected = a.grad
if a.is_sparse:
actual = actual.coalesce()
expected = expected.coalesce()
self.assertEqual(actual, expected)
for cuda in [False] + ([True] if torch.cuda.is_available() else []):
for pin_memory in [True, False]:
# FloatTensor
test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory)
# DoubleTensor
test(lambda: torch.randn(5, requires_grad=True, dtype=torch.double), cuda, pin_memory)
# Sparse tensor
x = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]), requires_grad=True)
test(lambda: x, cuda, pin_memory)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_graph_save_on_cpu_cuda(self):
def f(x):
a = x + 1
return a * a
# with grad
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_grad = torch.cuda.memory_allocated()
del a
del y
# without grad
a = torch.ones(1, requires_grad=True, device="cuda")
with torch.no_grad():
y = f(a)
memory_without_grad = torch.cuda.memory_allocated()
self.assertGreater(memory_with_grad, memory_without_grad)
del a
del y
# with hooks
with torch.autograd.graph.save_on_cpu():
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_hooks = torch.cuda.memory_allocated()
self.assertEqual(memory_with_hooks, memory_without_grad)
def test_pynode_destruction_deadlock(self):
script = """
import torch
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def forward(ctx, gO):
return gO.clone()
def get_out():
inp = torch.rand(2, requires_grad=True)
# The python function is first so that it runs
# last in the backward pass
right = Foo.apply(inp)
# An op that creates new memory
left1 = inp.clone()
# An op that saves its input
left2 = left1 ** 2
# Inplace modify so that the backward for
# left2 always raises an error
left1 += 1
# An op that takes both side as input.
# After running, both side's last op will be in
# the ready queue
# And the op for left will run first as it was
# executed last during the forward
out = left2 + right
return out
# Nothing should be global variables here as, from what
# I can see, python leaks all the global objects
get_out().sum().backward()
# This used to deadlock when the PyNode is being destroyed after
# the error is raised.
"""
try:
subprocess.check_output(
[sys.executable, '-c', script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),
# It is ok to have an extra long timeout here as a timeout means the test failed
timeout=20)
except subprocess.TimeoutExpired as e:
self.fail(msg="Example code timed out! See the code sample in the test for details.")
except subprocess.CalledProcessError as e:
err_msg = "RuntimeError: one of the variables needed for gradient computation"
self.assertTrue(err_msg in e.output.decode("utf-8"))
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
class TestAutogradForwardModeBatchedGrad(TestCase):
def test_out_of_place_basic(self):
a = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
b = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
self.assertTrue(gradcheck(torch.sin, a, check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
self.assertTrue(gradcheck(torch.add, (a, b), check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
def test_out_of_place_not_same_layout(self):
input = torch.zeros([2, 2]).transpose(0, 1)
tangent = torch.zeros([2, 2, 2])
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
return fwAD.unpack_dual(x)[1]
x_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIsNot(x_tangent, tangent)
def test_inplace_on_view_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
base = torch.zeros([2, 2])
view = base.view_as(base)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertFalse(view_tangent._is_view()) # Optimization to share the same tensor!
self.assertIs(view_tangent, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIs(view_tangent, tangent)
def test_inplace_on_view_not_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
view = torch.zeros([2, 2]).transpose(0, 1)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIs(view_tangent._base, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIsNot(view_tangent, tangent)
def test_metadata_check_for_storage_numel_skipped(self):
# See: test_metadata_check_checks_storage_numel for the reverse of this test
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(10, 4)
def jvp(tangent):
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# No copy is made
self.assertIs(tangent, unpacked_tangent)
# as_strided raises
with self.assertRaisesRegex(RuntimeError, "can access memory outside of `tensor`"):
dual.as_strided((5,), (1,), 0)
return unpacked_tangent
torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
def test_metadata_check_checks_storage_numel(self):
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(4)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# # Verify that mutating unpacked tangent does not affect the original tangent
tangent_clone = tangent.clone()
unpacked_tangent *= 2
self.assertTrue(torch.allclose(tangent_clone, tangent))
# as_strided runs without error
dual.as_strided((5,), (1,), 0)
def test_metadata_check_when_primal_has_conj_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj()
b = torch.rand_like(a)
self.assertTrue(torch.is_conj(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
def test_metadata_check_when_primal_has_neg_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj().imag
b = torch.randn(2, 2, dtype=torch.cdouble).imag
self.assertTrue(torch.is_neg(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_set_fw_grad_having_own_fw_grad_at_same_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
with self.assertRaisesRegex(RuntimeError, "has a forward gradient at the same level"):
fwAD.make_dual(baz, dual)
def test_make_dual_inference_tensor_in_inference_mode(self):
with torch.inference_mode():
foo = torch.rand(2)
bar = torch.rand(2)
foo_copy = foo.clone()
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertFalse(dual._is_view())
dual += 1
self.assertFalse(torch.allclose(foo, foo_copy))
def test_make_dual_torch_dispatch(self):
counter = [0]
class MySubclass(torch.Tensor):
def __new__(cls, data=None):
return torch.Tensor._make_subclass(cls, data)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func.overloadpacket == torch.ops.aten.alias:
counter[0] += 1
with no_dispatch():
return MySubclass(torch.ops.aten.alias(*args))
with no_dispatch():
return func(*args, **kwargs)
a = torch.tensor(1.)
s = MySubclass(a)
with fwAD.dual_level():
fwAD.make_dual(s, torch.rand_like(s))
self.assertEqual(counter[0], 1)
fwAD.make_dual(torch.rand_like(s), s)
self.assertEqual(counter[0], 2)
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check unpacked dual is returned as a named tuple
# NB: Every invocation of unpack_dual returns a new tensor view
self.assertIsNot(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertEqual(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertIs(baz_tangent, fwAD.unpack_dual(baz).tangent)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_view_inplace_always_creates_a_view(self):
# See https://github.com/pytorch/pytorch/issues/67800
# The codepath may depend on the op. At the time writing, when self is not a dual tensor
# the resulting forward grad for self for...
# - add_ has the same layout as self
# - mul_ has the same layout as other
# This is kind of fragile because the above depends on how the forward grad expression
# is written. For add and mul at least, the output inherits the layout of LHS.
# We want to handle at least these two cases.
inplace_binary_ops = ( # Add more to this list?
lambda x, y: x.add_(y),
lambda x, y: x.mul_(y),
lambda x, y: x.copy_(y),
)
for inplace_binary_op in inplace_binary_ops:
base = torch.randn(2, 2)
view = base.transpose(0, 1)
primal = torch.randn(2, 2)
tangent = torch.randn(2, 2)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
inplace_binary_op(view, dual)
# Verify that a view relationship is created for both the primal and tangent
p, t = fwAD.unpack_dual(base)
p_clone = p.clone()
t_clone = t.clone()
view *= 2
p, t = fwAD.unpack_dual(base)
self.assertTrue(torch.allclose(p_clone * 2, p))
self.assertTrue(torch.allclose(t_clone * 2, t))
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
def test_non_differentiable(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
# No differentiable outputs, shouldn't error
eq = foo == bar
# Inplace
foo.eq_(bar)
def test_create_new_zeros_with_same_meta(self):
new_zeroes_fn = torch.ops.aten._new_zeros_with_same_feature_meta
def check(a, b):
def assert_same_meta(t, target):
for num_bdim in range(t.dim()):
result = new_zeroes_fn(t, target, self_num_batch_dims=num_bdim)
self.assertEqual(result.dim(), target.dim() + num_bdim)
# Check size/strides match for feature dims only
for i in range(num_bdim, result.dim()):
self.assertEqual(result.size()[i], target.size()[i - num_bdim])
self.assertEqual(result.stride()[i], target.stride()[i - num_bdim])
# Check that we generate strides reasonably
if target.is_contiguous():
self.assertTrue(result.is_contiguous())
self.assertEqual(result.storage_offset(), target.storage_offset())
prod_of_t_bdims = reduce(operator.mul, t.size()[:num_bdim], 1)
self.assertEqual(len(result.storage()), len(target.storage()) * prod_of_t_bdims)
# TensorOptions is same
self.assertEqual(result.dtype, target.dtype)
assert_same_meta(a, b)
assert_same_meta(b, a)
a = torch.randn(5, dtype=torch.float)
b = torch.randn(2, 3, 4, dtype=torch.double)
check(a, b)
# non-contiguous case
a = torch.randn(2, 3, 4).transpose(0, 1).contiguous().transpose(0, 1)
b = torch.randn(2, 3, 4)
check(a, b)
a = torch.randn(5).narrow(0, 1, 2)
b = torch.randn(2)
check(a, b)
# tensor is not a view, but still does not index entirety of storage
a = torch.randn(5).resize_(4)
b = torch.randn(4)
check(a, b)
# Zero-numel tensors
a = torch.randn(1, 0, 2)
b = torch.randn(1, 2)
check(a, b)
# Scalar tensor
a = torch.tensor(1.)
b = torch.randn(1, 2)
check(a, b)
def test_backward_graph_destruction(self):
def fn():
a = torch.rand(10, requires_grad=True)
da = fwAD.make_dual(torch.rand_like(a), a)
# Create an object with a c++ cycle as:
# db -> AutogradMeta -> ForwardGrad -> db's grad
# db's grad -> AutogradMeta -> MulBackward
# MulBackward -> SavedVariable -> db
db = da.exp()
with fwAD.dual_level():
fn()
# This test make sure that we don't deadlock on exit of this
# context manager. If you do, there is something wrong with the
# locking of the forward ad level most likely
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@skipMeta
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
def _get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
before = _get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = _get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = _get_cuda_memory_usage()
self.assertEqual(before, after)
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong type raises
with self.assertRaisesRegex(TypeError, "expected to be a Tensor or None"):
x.grad = 0
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
def test_copy_forward_ad_broadcasting(self, device):
# copy_ allows the src to have a different shape from self as long as src is
# broadcastable to self. Make sure forward AD handles this case.
primal = torch.rand(3, 3, device=device)
tangent = torch.rand(3, 3, device=device)
non_dual = torch.rand(1, 3, 3, device=device)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
non_dual.copy_(dual)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
def test_warning_in_backward(self, device):
# Test warning during backward are always propagated as python warnings (gh-50209)
# NOTE: For device=cuda, warning gets propagated from a worker thread
a = torch.zeros((), device=device, requires_grad=True)
b = torch._C._nn._test_warn_in_autograd(a)
with self.assertWarnsRegex(UserWarning, "Warn from backward"):
b.backward()
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
for mode in (True, False):
@torch.inference_mode(mode)
def func(x):
self.assertEqual(torch.is_inference_mode_enabled(), mode)
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(not mode or torch.is_inference(d))
self.assertEqual(d.requires_grad, requires_grad and not mode)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
class PropagatingThread(threading.Thread):
'''Helper class to propagate exception from child
thread to main thread on join.
Reference: https://stackoverflow.com/a/31614591/5602957
'''
def run(self):
self.exception = None
try:
self.ret = super(PropagatingThread, self).run()
except Exception as e:
self.exception = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exception:
raise self.exception from self.exception
return self.ret
threads = []
for _ in range(num_threads):
p = PropagatingThread(target=fn, args=args)
p.start()
threads.append(p)
for p in threads:
p.join()
def test_multithreaded_exception_propagation(self):
# Test whether exception in child thread
# are propagated to main thread.
def fn():
self.assertTrue(False)
with self.assertRaises(AssertionError):
self._run_py_multithread_fn(fn)
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_multithread_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
def registers_hooks_for_each_thread():
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
x = torch.ones(5, 5, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
y = x * x
# should raise two warnings from x being saved twice
self.assertEqual(len(w), 2)
y.sum().backward()
def test_dataparallel_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
_self = self
class Model(torch.nn.Module):
def forward(self, x):
with warnings.catch_warnings(record=True) as w:
y = x * x
if torch.cuda.device_count() >= 2:
# DataParallel is calling the forward in different threads
# without progating TLS, so hooks should not be called here
_self.assertEqual(len(w), 0)
else:
# DataParallel only uses one thread
# so hooks should be called here
_self.assertGreater(len(w), 0)
x = torch.ones(5, 5, requires_grad=True)
model = torch.nn.DataParallel(Model())
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
model(x)
with warnings.catch_warnings(record=True) as w:
y = x * x
# hooks should be called here
_self.assertGreater(len(w), 0)
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# Import test cases from below autograd/ here. These are found
# implicitly by the loader, so Flake8 thinks they are unused, hence
# the suppressions.
from autograd.test_complex import TestAutogradComplex # noqa: F401
from autograd.test_functional import TestAutogradFunctional # noqa: F401
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
instantiate_parametrized_tests(TestAutograd)
if __name__ == '__main__':
run_tests()
|
player.py
|
#
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 analogue@yahoo.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import copy
import logging
import os
import threading
import time
import xbmc
import xbmcgui
import mythbox.msg as m
import mythbox.ui.toolkit as toolkit
from mythbox.ui.toolkit import showPopup
from mythbox.util import formatSeconds, BoundedEvictingQueue, safe_str, catchall
from mythbox.mythtv.db import inject_db
log = logging.getLogger('mythbox.ui')
mlog = logging.getLogger('mythbox.method')
mythPlayer = None
# Interval in millis to sleep when we're waiting around for
# async xbmc events to take complete
SLEEP_MILLIS = 250
class BasePlayer(xbmc.Player):
def __init__(self, *args, **kwargs):
xbmc.Player.__init__(self, *args, **kwargs)
self.active = True
self.tracker = PositionTracker(self)
def buildPlaybackUrl(self):
raise Exception('Abstract method')
def buildPlayList(self):
raise Exception('Abstract method')
def playRecording(self, commSkipper):
raise Exception('Abstract method')
@catchall
def onPlayBackStarted(self):
if self.active:
log.debug('> base:onPlayBackStarted %s' % self)
for target in (self.bookmarker, self.tracker, self.commSkipper):
try:
target.onPlayBackStarted()
except:
log.exception('onPlayBackStarted')
log.debug('< base:onPlayBackStarted %s' % self)
def onPlayBackStopped(self):
if self.active:
self.active = False
log.debug('> onPlayBackStopped')
for target in (self.tracker, self.commSkipper, self.bookmarker):
try:
target.onPlayBackStopped()
except:
log.exception('onPlayBackStopped')
log.debug('< onPlayBackStopped')
def onPlayBackEnded(self):
if self.active:
self.active = False
log.debug('> onPlayBackEnded')
for target in (self.tracker, self.commSkipper, self.bookmarker):
try:
target.onPlayBackEnded()
except:
log.exception('onPlayBackStopped')
log.debug('< onPlayBackEnded')
class MountedPlayer(BasePlayer):
'''Plays mythtv recordings with support for bookmarks, commercial skipping, etc'''
def __init__(self, *args, **kwargs):
BasePlayer.__init__(self, *args, **kwargs)
[setattr(self,k,v) for k,v in kwargs.iteritems() if k in ('translator', 'mythThumbnailCache', 'program', 'platform')]
self.bookmarker = MythBookmarker(self, self.program, self.translator)
self._playbackCompletedLock = threading.Event()
self._playbackCompletedLock.clear()
def buildPlaybackUrl(self):
return self.program.getLocalPath()
def playRecording(self, commSkipper):
"""
Plays the given program. Blocks until playback is stopped or until the
end of the recording is reached
"""
mlog.debug('> playRecording(%s)' % safe_str(self.program.title()))
assert not self.isPlaying(), 'Player is already playing a video'
self.commSkipper = commSkipper
self.play(self.buildPlaybackUrl(), self.buildPlayList(), windowed=False)
self.waitForPlaybackCompleted()
self.active = False
mlog.debug('< playRecording(...)')
# Callbacks ---------------------------------------------------------------
@catchall
def onPlayBackStopped(self):
if self.active:
try:
super(MountedPlayer, self).onPlayBackStopped()
finally:
self._playbackCompletedLock.set()
@catchall
def onPlayBackEnded(self):
if self.active:
try:
super(MountedPlayer, self).onPlayBackEnded()
finally:
self._playbackCompletedLock.set()
# Private -----------------------------------------------------------------
def waitForPlaybackCompleted(self):
while not self._playbackCompletedLock.isSet():
#log.debug('Waiting for playback completed...')
xbmc.sleep(SLEEP_MILLIS)
def buildPlayList(self):
mlog.debug("> _buildPlayList")
playlistItem = xbmcgui.ListItem()
title = self.program.fullTitle()
comms = self.program.getCommercials()
if len(comms) > 0:
title += '(%s breaks - %s)' % (len(comms), ', '.join(map(lambda c: formatSeconds(c.start), comms)))
playlistItem.setInfo(
"video", {
"Genre" : self.program.category(),
"Studio" : self.program.formattedChannel(),
"Title" : title,
"Plot" : self.program.formattedDescription()
})
# TODO: Set start offset if a comm break starts at 0.0
# playlistItem.setProperty('StartOffset', '256.4')
mlog.debug("< _buildPlayList")
return playlistItem
class StreamingPlayer(BasePlayer):
"""Use xbmcs built in myth support to stream the recording over the network."""
def __init__(self, *args, **kwargs):
BasePlayer.__init__(self, *args, **kwargs)
[setattr(self,k,v) for k,v in kwargs.iteritems() if k in ('settings', 'translator', 'mythThumbnailCache', 'program', 'platform')]
self.bookmarker = MythBookmarker(self, self.program, self.translator)
@inject_db
def buildPlaybackUrl(self):
backend = self.db().toBackend(self.program.hostname())
# myth://dbuser:dbpassword@mythbackend_hostname:mythbackend_port/recordings/filename.mpg
url = 'myth://%s:%s@%s:%s/recordings/%s' % (
self.settings.get('mysql_database'),
self.settings.get('mysql_password'),
backend.ipAddress,
backend.port,
self.program.getBareFilename())
log.debug('Playback url: %s' % url)
return url
def playRecording(self, commSkipper):
"""
Plays the given program. Blocks until playback is stopped or until the
end of the recording is reached
"""
mlog.debug('> playRecording %s' % safe_str(self.program.title()))
assert not self.isPlaying(), 'Player is already playing a video'
self.commSkipper = commSkipper
# extract recording's framerate from xbmc.log and inject into bookmarker
#from mythbox.log import LogScraper
#logtail = LogScraper(self.platform.getXbmcLog())
#worker = logtail.matchLineAsync("fps:", timeout=30, callback=self.bookmarker.onFPS)
self.play(self.buildPlaybackUrl(), self.buildPlayList())
#worker.join()
#self._waitForPlaybackCompleted()
#self.active = False
mlog.debug('< playRecording')
def buildPlayList(self):
playlistItem = xbmcgui.ListItem()
comms = self.program.getCommercials()
title = self.program.fullTitle()
if len(comms) > 0:
# times are invalid when streaming so only show cardinality
title += u' (%d breaks)' % len(comms)
playlistItem.setInfo(
"video", {
"Genre" : self.program.category(),
"Studio" : self.program.formattedChannel(),
"Title" : title,
"Plot" : self.program.formattedDescription()
})
return playlistItem
class Bookmarker(object):
pass
class XbmcBookmarker(Bookmarker):
'''When using a myth:// style URL for playback, defer to XBMC's built in
resume from last postion functionality'''
def __init__(self, *args, **kwargs):
pass
def onPlayBackStarted(self):
pass
def onPlayBackStopped(self):
pass
def onPlayBackEnded(self):
pass
class MythBookmarker(Bookmarker):
'''Mimics XBMC video player's builtin auto resume functionality'''
def __init__(self, player, program, translator):
self.player = player
self.program = program
self.translator = translator
#self.fps = None
def onPlayBackStarted(self):
self._resumeFromBookmark()
def onPlayBackStopped(self):
self._saveLastPositionAsBookmark()
def onPlayBackEnded(self):
self._clearBookmark()
# @catchall
# def onFPS(self, line):
# log.debug('onFPS: %s' % line)
# if line is not None:
# log.debug('onFPS: line not none')
# words = line.split()
# tagIndex = words.index('fps:')
# self.fps = float(words[tagIndex+1].strip(','))
# self.program.setFPS(self.fps)
# log.debug('fps = %s' % self.fps)
# else:
# log.debug('onFPS: line is none')
# self.fps = 0.0
# #if log.isEnabledFor(logging.DEBUG):
# # showPopup('FPS', 'FPS %s' % self.fps)
def _clearBookmark(self):
if self.program.isBookmarked():
self.program.setBookmark(0.0)
def _resumeFromBookmark(self):
log.debug('bookmarker : before wait for gotFPS')
# # wait for fps to be set by log scaper for a max of 10 seconds
# cnt = 0
# while self.fps is None and cnt < 100:
# time.sleep(0.1)
# cnt += 1
#
# if self.fps is None:
# log.warn('Timed out waiting for fps to be set on bookmarker')
# else:
# log.debug('bookmarker : after wait for gotFPS')
bookmarkSecs = self.program.getBookmark()
if bookmarkSecs > 0 and bookmarkSecs < (self.program.getDuration() * 60):
fb = formatSeconds(bookmarkSecs)
log.debug('Resuming recording at bookmarked position of %s' % fb)
showPopup(self.program.title(), self.translator.get(m.RESUMING_AT) % fb)
self.player.seekTime(bookmarkSecs)
while self.player.getTime() < bookmarkSecs:
log.debug('Waiting for player time %s to seek past bookmark of %s' %(formatSeconds(self.player.getTime()), fb))
xbmc.sleep(SLEEP_MILLIS)
else:
log.debug('Recording has no bookmark or bookmark exceeds program length')
def _saveLastPositionAsBookmark(self):
lastPos = self.player.tracker.getLastPosition()
log.debug('Setting bookmark on %s to %s' %(safe_str(self.program.title()), formatSeconds(lastPos)))
try:
self.program.setBookmark(lastPos)
except:
log.exception('_saveLastPositionAsBookmark catchall')
class PositionTracker(object):
"""
Tracks the last position of the player. This is necessary because
Player.getTime() is not valid after the callback to
Player.onPlayBackStopped() has completed.
"""
HISTORY_SECS = 5 # Number of seconds of history to keep around
def __init__(self, player):
self._player = player
self._lastPos = 0.0
self._tracker = BoundedEvictingQueue((1000/SLEEP_MILLIS) * self.HISTORY_SECS)
self._history = []
def onPlayBackStarted(self):
log.debug('Starting position tracker...')
self._tracker = threading.Thread(
name='Position Tracker',
target = self._trackPosition)
self._tracker.start()
def onPlayBackStopped(self):
if self._tracker.isAlive():
log.debug('Position tracker stop called. Still alive = %s' % self._tracker.isAlive())
else:
log.debug('Position tracker thread already dead.')
def onPlayBackEnded(self):
self.onPlayBackStopped()
def getHistory(self, howFarBack):
"""Returns a list of TrackerSamples from 'howFarBack' seconds ago."""
endPos = self._lastPos
startPos = endPos - howFarBack
slice = []
for sample in self._history:
if startPos <= sample.pos and sample.pos <= endPos:
slice.append(sample)
log.debug('Tracker history for %s secs = [%s] %s' % (howFarBack, len(slice), slice))
return slice
def getLastPosition(self):
return self._lastPos
def _trackPosition(self):
"""Method run in a separate thread. Tracks last position of player as long as it is playing"""
try:
while self._player.isPlaying():
self._lastPos = self._player.getTime()
self._history.append(TrackerSample(time.time(), self._lastPos))
#log.debug('Tracker time = %s' % self._lastPos)
xbmc.sleep(SLEEP_MILLIS)
log.debug('Position tracker thread exiting with lastPos = %s' % self.getLastPosition())
except:
log.exception('_trackPosition catchall')
class TrackerSample(object):
def __init__(self, time, pos):
self.time = time
self.pos = pos
def __repr__(self):
return 'Sample {time = %s, pos = %s}' % (self.time, self.pos)
class ICommercialSkipper(object):
"""Common interface for commercial skipping implementations."""
def __init__(self, player, program, translator):
self.player = player
self.program = program
self.translator = translator
def onPlayBackStarted(self):
raise NotImplementedError, 'Abstract base class'
def onPlayBackStopped(self):
raise NotImplementedError, 'Abstract base class'
def onPlayBackEnded(self):
raise NotImplementedError, 'Abstract base class'
class NoOpCommercialSkipper(ICommercialSkipper):
def __init__(self, player=None, program=None, translator=None):
ICommercialSkipper.__init__(self, player, program, translator)
def onPlayBackStarted(self):
pass
def onPlayBackStopped(self):
pass
def onPlayBackEnded(self):
pass
class TrackingCommercialSkipper(ICommercialSkipper):
"""
Commercial skipper that monitors the position of the currently playing file
and skips commercials accordingly.
"""
def __init__(self, player, program, translator):
ICommercialSkipper.__init__(self, player, program, translator)
def onPlayBackStarted(self):
log.debug('program in skipper = %s' % safe_str(self.program.title()))
# don't want changes to commbreak.skipped to stick beyond the scope of
# this player instance so use a deepcopy
self._breaks = copy.deepcopy(self.program.getCommercials())
# Has a value when video position falls in a comm break
self._currentBreak = None
for b in self._breaks:
log.debug('break = %s' % b)
self._skipper = threading.Thread(name='Tracking Commercial Skipper', target = self._trackCommercials)
self._skipper.start()
def onPlayBackStopped(self):
if self._skipper.isAlive():
log.debug('Commercial tracker stop called. Still alive = %s' % self._skipper.isAlive())
else:
log.debug('Commercial tracker thread already dead')
def onPlayBackEnded(self):
self.onPlayBackStopped()
def _isInBreak(self, pos):
for b in self._breaks:
if b.isDuring(pos):
self._currentBreak = b
return True
self._currentBreak = None
return False
def _trackCommercials(self):
"""Method run in a separate thread to skip over commercials"""
try:
if len(self._breaks) == 0:
log.debug('Recording %s has no comm breaks, exiting comm tracker' % safe_str(self.program.title()))
return
while self.player.isPlaying():
pos = self.player.getTime()
if self._isInBreak(pos) and not self._currentBreak.skipped:
log.debug('entered comm break = %s' % self._currentBreak)
if self._isCloseToStartOfCommercial(pos) and not self._wasUserSkippingAround(pos):
log.debug('Comm skip activated!')
showPopup(self.program.title(), self.translator.get(m.SKIPPING_COMMERCIAL) % formatSeconds(self._currentBreak.duration()), 3000)
self.player.seekTime(self._currentBreak.end)
self._waitForPlayerToPassCommercialBreak()
self._currentBreak.skipped = True
if self._landedInCommercial(pos):
log.debug("Landed in comm break and want to skip forward")
showPopup(self.program.title(), self.translator.get(m.FORWARDING_THROUGH) % formatSeconds(self._currentBreak.duration()), 3000)
self.player.seekTime(self._currentBreak.end)
self._waitForPlayerToPassCommercialBreak()
self._currentBreak.skipped = True
xbmc.sleep(SLEEP_MILLIS)
log.debug('Commercial tracker thread exiting')
except:
log.exception('_trackCommercials catchall')
def _landedInCommercial(self, currPos):
#samplesInCommercial = 4 # In commercial for 2 seconds
secondsToSample = 4
samples = self.player.tracker.getHistory(secondsToSample)
samplesInCommercial = len(filter(lambda x: self._currentBreak.isDuring(x.pos), samples))
log.debug('Samples in commercial = %d' % samplesInCommercial)
return samplesInCommercial > 8 and samplesInCommercial < 12
def _wasUserSkippingAround(self, currPos):
"""
Check last 2 seconds of history for number of samples.
A high number of samples indicates that user was probably
not skipping around in the video hence the comm skip would
be a good thing.
"""
wasSkipping = False
samplePeriodSecs = 2 # TODO: Pass in as param to method call
# If currPos is too close to the start of the video..assume not
# skipping around
if currPos > samplePeriodSecs:
requiredSamples = 6 # TODO: Derive as percentage instead of hardcoding
numSamples = len(self.player.tracker.getHistory(samplePeriodSecs))
log.debug('Samples in last %s seconds = %s' %(samplePeriodSecs, numSamples))
wasSkipping = numSamples < requiredSamples
log.debug('User was skipping around = %s' % wasSkipping)
return wasSkipping
def _isCloseToStartOfCommercial(self, currPos):
"""
check that the current pos is close in proximity to the start of the
commercial break. assumes that comm break is skipped only if the user
played directly into the commercial vs. landing inside the commercial
via ffwd, rewind, etc.
"""
windowStart = self._currentBreak.start - 1
windowEnd = self._currentBreak.start + 2
isClose = currPos >= windowStart and currPos <= windowEnd
log.debug('User close to start of comm break = %s' % isClose)
return isClose
def _waitForPlayerToPassCommercialBreak(self):
# TODO: What if user stops playing while in this loop? Add isPlaying() to loop invariant
# wait for player pos to pass current break
while self._currentBreak.isDuring(self.player.getTime()):
xbmc.sleep(SLEEP_MILLIS)
|
test_rpc.py
|
import os
import time
import socket
import dgl
import backend as F
import unittest, pytest
import multiprocessing as mp
from numpy.testing import assert_array_equal
from utils import reset_envs
if os.name != 'nt':
import fcntl
import struct
INTEGER = 2
STR = 'hello world!'
HELLO_SERVICE_ID = 901231
TENSOR = F.zeros((10, 10), F.int64, F.cpu())
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def foo(x, y):
assert x == 123
assert y == "abc"
class MyRequest(dgl.distributed.Request):
def __init__(self):
self.x = 123
self.y = "abc"
self.z = F.randn((3, 4))
self.foo = foo
def __getstate__(self):
return self.x, self.y, self.z, self.foo
def __setstate__(self, state):
self.x, self.y, self.z, self.foo = state
def process_request(self, server_state):
pass
class MyResponse(dgl.distributed.Response):
def __init__(self):
self.x = 432
def __getstate__(self):
return self.x
def __setstate__(self, state):
self.x = state
def simple_func(tensor):
return tensor
class HelloResponse(dgl.distributed.Response):
def __init__(self, hello_str, integer, tensor):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
def __getstate__(self):
return self.hello_str, self.integer, self.tensor
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor = state
class HelloRequest(dgl.distributed.Request):
def __init__(self, hello_str, integer, tensor, func):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
self.func = func
def __getstate__(self):
return self.hello_str, self.integer, self.tensor, self.func
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor, self.func = state
def process_request(self, server_state):
assert self.hello_str == STR
assert self.integer == INTEGER
new_tensor = self.func(self.tensor)
res = HelloResponse(self.hello_str, self.integer, new_tensor)
return res
def start_server(num_clients, ip_config, server_id=0):
print("Sleep 2 seconds to test client re-connect.")
time.sleep(2)
server_state = dgl.distributed.ServerState(None, local_g=None, partition_book=None)
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
print("Start server {}".format(server_id))
dgl.distributed.start_server(server_id=server_id,
ip_config=ip_config,
num_servers=1,
num_clients=num_clients,
server_state=server_state)
def start_client(ip_config):
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.connect_to_server(ip_config=ip_config, num_servers=1)
req = HelloRequest(STR, INTEGER, TENSOR, simple_func)
# test send and recv
dgl.distributed.send_request(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test send_request_to_machine
dgl.distributed.send_request_to_machine(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call_to_machine
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call_to_machine(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
def test_serialize():
reset_envs()
os.environ['DGL_DIST_MODE'] = 'distributed'
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload
SERVICE_ID = 12345
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
req1 = deserialize_from_payload(MyRequest, data, tensors)
req1.foo(req1.x, req1.y)
assert req.x == req1.x
assert req.y == req1.y
assert F.array_equal(req.z, req1.z)
res = MyResponse()
data, tensors = serialize_to_payload(res)
res1 = deserialize_from_payload(MyResponse, data, tensors)
assert res.x == res1.x
def test_rpc_msg():
reset_envs()
os.environ['DGL_DIST_MODE'] = 'distributed'
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload, RPCMessage
SERVICE_ID = 32452
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
rpcmsg = RPCMessage(SERVICE_ID, 23, 0, 1, data, tensors)
assert rpcmsg.service_id == SERVICE_ID
assert rpcmsg.msg_seq == 23
assert rpcmsg.client_id == 0
assert rpcmsg.server_id == 1
assert len(rpcmsg.data) == len(data)
assert len(rpcmsg.tensors) == 1
assert F.array_equal(rpcmsg.tensors[0], req.z)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_rpc():
reset_envs()
os.environ['DGL_DIST_MODE'] = 'distributed'
ip_config = open("rpc_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(1, "rpc_ip_config.txt"))
pclient = ctx.Process(target=start_client, args=("rpc_ip_config.txt",))
pserver.start()
pclient.start()
pserver.join()
pclient.join()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_multi_client():
reset_envs()
os.environ['DGL_DIST_MODE'] = 'distributed'
ip_config = open("rpc_ip_config_mul_client.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(10, "rpc_ip_config_mul_client.txt"))
pclient_list = []
for i in range(10):
pclient = ctx.Process(target=start_client, args=("rpc_ip_config_mul_client.txt",))
pclient_list.append(pclient)
pserver.start()
for i in range(10):
pclient_list[i].start()
for i in range(10):
pclient_list[i].join()
pserver.join()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_multi_thread_rpc():
reset_envs()
os.environ['DGL_DIST_MODE'] = 'distributed'
ip_config = open("rpc_ip_config_multithread.txt", "w")
num_servers = 2
for _ in range(num_servers): # 3 servers
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
ctx = mp.get_context('spawn')
pserver_list = []
for i in range(num_servers):
pserver = ctx.Process(target=start_server, args=(1, "rpc_ip_config_multithread.txt", i))
pserver.start()
pserver_list.append(pserver)
def start_client_multithread(ip_config):
import threading
dgl.distributed.connect_to_server(ip_config=ip_config, num_servers=1)
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
req = HelloRequest(STR, INTEGER, TENSOR, simple_func)
dgl.distributed.send_request(0, req)
def subthread_call(server_id):
req = HelloRequest(STR, INTEGER, TENSOR+ server_id, simple_func)
dgl.distributed.send_request(server_id, req)
subthread = threading.Thread(target=subthread_call, args=(1,))
subthread.start()
subthread.join()
res0 = dgl.distributed.recv_response()
res1 = dgl.distributed.recv_response()
assert_array_equal(F.asnumpy(res0.tensor), F.asnumpy(TENSOR))
assert_array_equal(F.asnumpy(res1.tensor), F.asnumpy(TENSOR+1))
dgl.distributed.exit_client()
start_client_multithread("rpc_ip_config_multithread.txt")
pserver.join()
if __name__ == '__main__':
test_serialize()
test_rpc_msg()
test_rpc()
test_multi_client()
test_multi_thread_rpc()
|
main.py
|
from tkinter.filedialog import askopenfilename, askdirectory, asksaveasfilename
import tkinter
from tkinter import messagebox
from tkinter.ttk import Progressbar, Style
from PIL import ImageTk, Image
import time
import os
import sys
import subprocess
import threading
try:
from src.docs import config
except Exception as e:
print(str(e))
sys.exit(0)
class Gui:
DARK_THEME = {
'PRIMARY': '#222',
'SECONDARY': '#333',
'TEXT': '#fff',
'SEC_TEXT': '#4d5c69'
}
LIGHT_THEME = {
'PRIMARY': '#fff',
'SECONDARY': '#eee',
'TEXT': '#222',
'SEC_TEXT': '#a9b5c0'
}
APP_NAME = 'Timefy'
def __init__(self):
self.THEME = self.DARK_THEME if config.DARK_THEME else self.LIGHT_THEME
gui_dir = os.path.dirname(__file__)
assets_dir = gui_dir + './../assets/'
self.top = tkinter.Tk()
self.top.title(self.APP_NAME)
self.top.geometry("500x175")
self.top.resizable(False, False)
self.top.iconbitmap(default=assets_dir + 'favicon.ico')
frame = tkinter.Frame(self.top, padx=10, pady=10, bg=self.THEME['PRIMARY'])
frame.pack(fill=tkinter.BOTH, expand=True)
searchImg = ImageTk.PhotoImage(Image.open(assets_dir + 'search.png').resize((20, 20), Image.ANTIALIAS))
sourceButton = tkinter.Button(frame, image=searchImg, padx=0, relief=tkinter.FLAT, command=self.__load_source, bg=self.THEME['PRIMARY'])
sourceButton.image = searchImg
sourceButton.grid(column=2, row=0, padx=(5, 5), pady=(5, 0))
outputButton = tkinter.Button(frame, image=searchImg, padx=0, relief=tkinter.FLAT, command=self.__load_output, bg=self.THEME['PRIMARY'])
outputButton.grid(column=2, row=1, padx=(5, 5), pady=(5, 0))
sourceLabel = tkinter.Label(frame, text='Source', bg=self.THEME['PRIMARY'], fg=self.THEME['TEXT'], width=8)
sourceLabel.grid(row=0, column=0)
self.sourceValue = tkinter.StringVar()
source = tkinter.Entry(frame, bg=self.THEME['SECONDARY'], textvariable=self.sourceValue, fg=self.THEME['TEXT'], width=60, borderwidth=5, relief=tkinter.FLAT, state='disabled', disabledbackground=self.THEME['SECONDARY'], disabledforeground=self.THEME['TEXT'])
source.grid(row=0, column=1, pady=(6, 0))
outputLabel = tkinter.Label(frame, text='Output', bg=self.THEME['PRIMARY'], fg=self.THEME['TEXT'], width=8)
outputLabel.grid(column=0, row=1)
self.outputValue = tkinter.StringVar()
output = tkinter.Entry(frame, bg=self.THEME['SECONDARY'], textvariable=self.outputValue, fg=self.THEME['TEXT'], width=60, borderwidth=5, relief=tkinter.FLAT, state='disabled', disabledbackground=self.THEME['SECONDARY'], disabledforeground=self.THEME['TEXT'])
output.grid(row=1, column=1, pady=(6, 0))
generate = tkinter.Button(frame, text='GENERATE', bg='#3742fa', fg='#fff', bd=0, padx=15, pady=5, command=self.__gen)
generate.grid(row=2, column=1, columnspan=2, sticky=tkinter.E, pady=(20, 0))
self.should_append = tkinter.IntVar()
# append = tkinter.Checkbutton(frame, text="Append", selectcolor=self.THEME['SECONDARY'], relief=tkinter.FLAT, onvalue=1, offvalue=0, variable=self.should_append, bg=self.THEME['PRIMARY'], activebackground=self.THEME['PRIMARY'], activeforeground=self.THEME['TEXT'], fg=self.THEME['TEXT'])
# append.grid(row=2, column=1, pady=(20, 0), padx=(175, 0))
reset = tkinter.Button(frame, text='RESET', bg=self.THEME['SECONDARY'], fg=self.THEME['TEXT'], padx=15, pady=5, bd=0, command=self.reset)
reset.grid(row=2, column=1, pady=(20, 0), padx=(175, 0))
github = tkinter.Label(frame, text='github.com/rodchenk', bg=self.THEME['PRIMARY'], fg=self.THEME['SEC_TEXT'], pady=5)
github.grid(row=2, column=0, columnspan=2, sticky=tkinter.W, pady=(20, 0), padx=10)
s = Style()
s.theme_use("default")
s.configure("TProgressbar", thickness=5, background='#26A65B', troughrelief='flat')
self.progress = Progressbar(frame, orient=tkinter.HORIZONTAL, length=465, mode='determinate', style="TProgressbar")
def run(self):
self.top.mainloop()
def reset(self):
self.outputValue.set('')
self.sourceValue.set('')
def __show_progress(self):
self.progress.grid(row=3, column=0, columnspan=3, pady=(25, 0))
for x in range(51):
self.progress['value'] = 2 * x
self.top.update_idletasks()
time.sleep(0.01)
def __hide_progress(self):
self.progress.grid_forget()
def __gen(self):
source, output, append = self.sourceValue.get(), self.outputValue.get(), self.should_append.get() == 1
if not source or not output:
return
# self.__show_progress()
threading.Thread(target=self.__show_progress).start()
result = self.__call_script(source, output)
if result == 0:
_open = messagebox.askyesno('Success', 'Report has been generated. Do you want to open it?')
if _open:
subprocess.Popen(output, shell=True, stdout = subprocess.PIPE)
else:
messagebox.showerror('Error', 'An error has occured')
self.__hide_progress()
def __call_script(self, source, output):
cli_path = os.path.dirname(__file__) + './../main.py'
command = 'python %s -s %s -o %s' % (cli_path, source, output)
p = subprocess.Popen(command, shell=True, stdout = subprocess.PIPE)
stdout, stderr = p.communicate()
return p.returncode
def __load_source(self):
dname = askdirectory()
self.sourceValue.set(dname)
def __load_output(self):
fname = asksaveasfilename(filetypes=(("CSV Files", "*.csv;"), ("All files", "*.*") ))
if not fname:
return
if not fname.endswith('.csv'):
if fname[-1:] == '.':
fname = fname[:-1]
fname += '.csv'
self.outputValue.set(fname)
if __name__ == '__main__':
try:
Gui().run()
except IOError as e:
print(str(e))
|
server_launcher.py
|
#!/usr/bin/python
import os
import shutil
import time
from conans import SERVER_CAPABILITIES
from conans.paths.simple_paths import SimplePaths
from conans.server.conf import get_server_store
from conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager
from conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager
from conans.server.migrate import migrate_and_get_server_config
from conans.server.rest.server import ConanServer
from conans.server.service.authorize import BasicAuthenticator, BasicAuthorizer
from conans.test.utils.test_files import temp_folder
from conans.util.files import mkdir
from conans.util.log import logger
TESTING_REMOTE_PRIVATE_USER = "private_user"
TESTING_REMOTE_PRIVATE_PASS = "private_pass"
class TestServerLauncher(object):
port = 0
def __init__(self, base_path=None, read_permissions=None,
write_permissions=None, users=None, base_url=None, plugins=None,
server_capabilities=None):
plugins = plugins or []
if not base_path:
base_path = temp_folder()
if not os.path.exists(base_path):
raise Exception("Base path not exist! %s")
# Define storage_folder, if not, it will be read from conf file and
# pointed to real user home
self.storage_folder = os.path.join(base_path, ".conan_server", "data")
mkdir(self.storage_folder)
server_config = migrate_and_get_server_config(base_path, self.storage_folder)
if server_capabilities is None:
server_capabilities = set(SERVER_CAPABILITIES)
if TestServerLauncher.port == 0:
TestServerLauncher.port = server_config.port
# Encode and Decode signature for Upload and Download service
updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,
server_config.authorize_timeout)
base_url = base_url or server_config.public_url
self.server_store = get_server_store(server_config.disk_storage_path,
base_url, updown_auth_manager)
# Prepare some test users
if not read_permissions:
read_permissions = server_config.read_permissions
read_permissions.append(("private_library/1.0.0@private_user/testing", "*"))
read_permissions.append(("*/*@*/*", "*"))
if not write_permissions:
write_permissions = server_config.write_permissions
if not users:
users = dict(server_config.users)
users[TESTING_REMOTE_PRIVATE_USER] = TESTING_REMOTE_PRIVATE_PASS
authorizer = BasicAuthorizer(read_permissions, write_permissions)
authenticator = BasicAuthenticator(users)
credentials_manager = JWTCredentialsManager(server_config.jwt_secret,
server_config.jwt_expire_time)
logger.debug("Storage path: %s" % self.storage_folder)
self.port = TestServerLauncher.port
self.paths = SimplePaths(server_config.disk_storage_path)
self.ra = ConanServer(self.port, credentials_manager, updown_auth_manager,
authorizer, authenticator, self.server_store,
server_capabilities)
for plugin in plugins:
self.ra.api_v1.install(plugin)
self.ra.api_v2.install(plugin)
def start(self, daemon=True):
"""from multiprocessing import Process
self.p1 = Process(target=ra.run, kwargs={"host": "0.0.0.0"})
self.p1.start()
self.p1"""
import threading
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
self.t1 = StoppableThread(target=self.ra.run, kwargs={"host": "0.0.0.0", "quiet": True})
self.t1.daemon = daemon
self.t1.start()
time.sleep(1)
def stop(self):
self.ra.root_app.close()
self.t1.stop()
def clean(self):
if os.path.exists(self.storage_folder):
try:
shutil.rmtree(self.storage_folder)
except:
print("Can't clean the test server data, probably a server process is still opened")
if __name__ == "__main__":
server = TestServerLauncher()
server.start(daemon=False)
|
test_SeqIO_index.py
|
# Copyright 2009-2017 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for Bio.SeqIO.index(...) and index_db() functions."""
try:
import sqlite3
except ImportError:
# Try to run what tests we can on Jython
# where we don't expect this to be installed.
sqlite3 = None
import sys
import os
import unittest
import tempfile
import threading
import gzip
import warnings
from io import BytesIO
from io import StringIO
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio.SeqIO._index import _FormatToRandomAccess
from Bio.Alphabet import generic_protein, generic_nucleotide, generic_dna
from seq_tests_common import compare_record
from Bio import BiopythonParserWarning
from Bio import MissingPythonDependencyError
try:
from test_bgzf import _have_bug17666
do_bgzf = _have_bug17666()
except MissingPythonDependencyError:
do_bgzf = False
CUR_DIR = os.getcwd()
def add_prefix(key):
"""Sample key_function for testing index code."""
return "id_" + key
def gzip_open(filename, format):
# At time of writing, under Python 3.2.2 seems gzip.open(filename, mode)
# insists on giving byte strings (i.e. binary mode)
# See http://bugs.python.org/issue13989
if sys.version_info[0] < 3 or format in SeqIO._BinaryFormats:
return gzip.open(filename)
handle = gzip.open(filename)
data = handle.read() # bytes!
handle.close()
return StringIO(data.decode())
if sqlite3:
def raw_filenames(index_filename):
"""Open SQLite index and extract filenames (as is).
Returns a 2-tuple, holding a list of strings, and the value
of the meta_data.filenames_relative_to_index (or None).
"""
con = sqlite3.dbapi2.connect(index_filename)
filenames = [row[0] for row in
con.execute("SELECT name FROM file_data "
"ORDER BY file_number;").fetchall()]
try:
filenames_relative_to_index, = con.execute(
"SELECT value FROM meta_data WHERE key=?;",
("filenames_relative_to_index",)).fetchone()
filenames_relative_to_index = (filenames_relative_to_index.upper() == "TRUE")
except TypeError:
filenames_relative_to_index = None
con.close()
return filenames, filenames_relative_to_index
class OldIndexTest(unittest.TestCase):
"""Testing a pre-built index (make sure cross platform etc).
>>> from Bio import SeqIO
>>> d = SeqIO.index_db("triple_sff.idx", ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"], "sff")
>>> len(d)
54
"""
def setUp(self):
os.chdir(CUR_DIR)
def tearDown(self):
os.chdir(CUR_DIR)
def test_old(self):
"""Load existing index with no options (from parent directory)."""
d = SeqIO.index_db("Roche/triple_sff.idx")
self.assertEqual(54, len(d))
self.assertRaises(FileNotFoundError, d.get_raw, "alpha")
def test_old_check_same_thread(self):
"""Setting check_same_thread to False doesn't raise an exception."""
d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx")
def reader_thread():
try:
d["alpha"]
except sqlite3.ProgrammingError:
self.fail("Raised sqlite3.ProgrammingError in violation of check_same_thread=False")
reader = threading.Thread(target=reader_thread)
reader.start()
reader.join()
def test_old_rel(self):
"""Load existing index (with relative paths) with no options (from parent directory)."""
d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_contents(self):
"""Check actual filenames in existing indexes."""
filenames, flag = raw_filenames("Roche/triple_sff.idx")
self.assertEqual(flag, None)
self.assertEqual(filenames, ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"])
filenames, flag = raw_filenames("Roche/triple_sff_rel_paths.idx")
self.assertEqual(flag, True)
self.assertEqual(filenames, ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"])
def test_old_same_dir(self):
"""Load existing index with no options (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db("triple_sff.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_same_dir_rel(self):
"""Load existing index (with relative paths) with no options (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db("triple_sff_rel_paths.idx")
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_format(self):
"""Load existing index with correct format."""
d = SeqIO.index_db("Roche/triple_sff.idx", format="sff")
self.assertEqual(54, len(d))
def test_old_format_wrong(self):
"""Load existing index with wrong format."""
self.assertRaises(ValueError, SeqIO.index_db,
"Roche/triple_sff.idx", format="fasta")
def test_old_files(self):
"""Load existing index with correct files (from parent directory)."""
d = SeqIO.index_db("Roche/triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"])
self.assertEqual(54, len(d))
self.assertRaises(FileNotFoundError, d.get_raw, "alpha")
def test_old_files_same_dir(self):
"""Load existing index with correct files (from same directory)."""
os.chdir("Roche")
d = SeqIO.index_db("triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"])
self.assertEqual(54, len(d))
self.assertEqual(395, len(d["alpha"]))
def test_old_files_wrong(self):
"""Load existing index with wrong files."""
self.assertRaises(ValueError, SeqIO.index_db,
"Roche/triple_sff.idx", ["a.sff", "b.sff", "c.sff"])
def test_old_files_wrong2(self):
"""Load existing index with wrong number of files."""
self.assertRaises(ValueError, SeqIO.index_db,
"Roche/triple_sff.idx",
["E3MFGYR02_no_manifest.sff", "greek.sff"])
class NewIndexTest(unittest.TestCase):
"""Check paths etc in newly built index."""
def setUp(self):
os.chdir(CUR_DIR)
def tearDown(self):
os.chdir(CUR_DIR)
for i in ["temp.idx", "Roche/temp.idx"]:
if os.path.isfile(i):
os.remove(i)
def check(self, index_file, sff_files, expt_sff_files):
if os.path.isfile(index_file):
os.remove(index_file)
# Build index...
d = SeqIO.index_db(index_file, sff_files, "sff")
self.assertEqual(395, len(d["alpha"]))
d._con.close() # hack for PyPy
d.close()
self.assertEqual([os.path.abspath(f) for f in sff_files],
[os.path.abspath(f) for f in d._filenames])
# Now directly check the filenames inside the SQLite index:
filenames, flag = raw_filenames(index_file)
self.assertEqual(flag, True)
self.assertEqual(filenames, expt_sff_files)
# Load index...
d = SeqIO.index_db(index_file, sff_files)
self.assertEqual(395, len(d["alpha"]))
d._con.close() # hack for PyPy
d.close()
self.assertEqual([os.path.abspath(f) for f in sff_files], d._filenames)
os.remove(index_file)
def test_child_folder_rel(self):
"""Check relative links to child folder."""
# Note we expect relative paths recorded with Unix slashs!
expt_sff_files = ["Roche/E3MFGYR02_no_manifest.sff",
"Roche/greek.sff",
"Roche/paired.sff"]
self.check("temp.idx", expt_sff_files, expt_sff_files)
# Here index is given as abs
self.check(os.path.abspath("temp.idx"),
["Roche/E3MFGYR02_no_manifest.sff",
os.path.abspath("Roche/greek.sff"),
"Roche/paired.sff"],
expt_sff_files)
# Here index is given as relative path
self.check("temp.idx",
["Roche/E3MFGYR02_no_manifest.sff",
os.path.abspath("Roche/greek.sff"),
"Roche/paired.sff"],
expt_sff_files)
def test_same_folder(self):
"""Check relative links in same folder."""
os.chdir("Roche")
expt_sff_files = ["E3MFGYR02_no_manifest.sff", "greek.sff", "paired.sff"]
# Here everything is relative,
self.check("temp.idx", expt_sff_files, expt_sff_files)
self.check(os.path.abspath("temp.idx"),
["E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff"],
expt_sff_files)
self.check("temp.idx",
["E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff"],
expt_sff_files)
self.check("../Roche/temp.idx",
["E3MFGYR02_no_manifest.sff",
os.path.abspath("greek.sff"),
"../Roche/paired.sff"],
expt_sff_files)
def test_some_abs(self):
"""Check absolute filenames in index.
Unless the repository and tests themselves are under the temp
directory (as detected by ``tempfile``), we expect the index to
use absolute filenames.
"""
h, t = tempfile.mkstemp(prefix="index_test_", suffix=".idx")
os.close(h)
os.remove(t)
abs_sff_files = [os.path.abspath("Roche/E3MFGYR02_no_manifest.sff"),
os.path.abspath("Roche/greek.sff"),
os.path.abspath(os.path.join("Roche", "paired.sff"))]
if os.getcwd().startswith(os.path.dirname(t)):
# The tests are being run from within the temp directory,
# e.g. index filename /tmp/index_test_XYZ.idx
# and working directory of /tmp/biopython/Tests/
# This means the indexing will use a RELATIVE path
# e.g. biopython/Tests/Roche/E3MFGYR02_no_manifest.sff
# not /tmp/biopython/Tests/Roche/E3MFGYR02_no_manifest.sff
expt_sff_files = [os.path.relpath(f, os.path.dirname(t))
for f in abs_sff_files]
else:
expt_sff_files = abs_sff_files
# Providing absolute paths...
self.check(t, abs_sff_files, expt_sff_files)
# Now try with mix of abs and relative paths...
self.check(t,
[os.path.abspath("Roche/E3MFGYR02_no_manifest.sff"),
os.path.join("Roche", "greek.sff"),
os.path.abspath("Roche/paired.sff")],
expt_sff_files)
class IndexDictTests(unittest.TestCase):
"""Cunning unit test where methods are added at run time."""
def setUp(self):
os.chdir(CUR_DIR)
h, self.index_tmp = tempfile.mkstemp("_idx.tmp")
os.close(h)
def tearDown(self):
os.chdir(CUR_DIR)
if os.path.isfile(self.index_tmp):
os.remove(self.index_tmp)
def simple_check(self, filename, format, alphabet, comp):
"""Check indexing (without a key function)."""
if comp:
h = gzip_open(filename, format)
id_list = [rec.id for rec in SeqIO.parse(h, format, alphabet)]
h.close()
else:
id_list = [rec.id for rec in SeqIO.parse(filename, format, alphabet)]
with warnings.catch_warnings():
if "_alt_index_" in filename:
# BiopythonParserWarning: Could not parse the SFF index:
# Unknown magic number b'.diy' in SFF index header:
# b'.diy1.00'
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, format, alphabet)
self.check_dict_methods(rec_dict, id_list, id_list)
rec_dict.close()
del rec_dict
if not sqlite3:
return
# In memory,
# note here give filenames as list of strings
rec_dict = SeqIO.index_db(":memory:", [filename], format,
alphabet)
self.check_dict_methods(rec_dict, id_list, id_list)
rec_dict.close()
del rec_dict
# check error conditions
self.assertRaises(ValueError, SeqIO.index_db,
":memory:", format="dummy")
self.assertRaises(ValueError, SeqIO.index_db,
":memory:", filenames=["dummy"])
# Saving to file...
index_tmp = self.index_tmp
if os.path.isfile(index_tmp):
os.remove(index_tmp)
# To disk,
# note here we give the filename as a single string
# to confirm that works too (convience feature).
rec_dict = SeqIO.index_db(index_tmp, filename, format,
alphabet)
self.check_dict_methods(rec_dict, id_list, id_list)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
del rec_dict
# Now reload it...
rec_dict = SeqIO.index_db(index_tmp, [filename], format,
alphabet)
self.check_dict_methods(rec_dict, id_list, id_list)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
del rec_dict
# Now reload without passing filenames and format
# and switch directory to check paths still work
index_tmp = os.path.abspath(index_tmp)
os.chdir(os.path.dirname(filename))
rec_dict = SeqIO.index_db(index_tmp, alphabet=alphabet)
self.check_dict_methods(rec_dict, id_list, id_list)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
del rec_dict
os.remove(index_tmp)
def key_check(self, filename, format, alphabet, comp):
"""Check indexing with a key function."""
if comp:
h = gzip_open(filename, format)
id_list = [rec.id for rec in SeqIO.parse(h, format, alphabet)]
h.close()
else:
id_list = [rec.id for rec in SeqIO.parse(filename, format, alphabet)]
key_list = [add_prefix(id) for id in id_list]
with warnings.catch_warnings():
if "_alt_index_" in filename:
# BiopythonParserWarning: Could not parse the SFF index:
# Unknown magic number b'.diy' in SFF index header:
# b'.diy1.00'
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, format, alphabet, add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list)
rec_dict.close()
del rec_dict
if not sqlite3:
return
# In memory,
rec_dict = SeqIO.index_db(":memory:", [filename], format, alphabet,
add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list)
# check error conditions
self.assertRaises(ValueError, SeqIO.index_db,
":memory:", format="dummy",
key_function=add_prefix)
self.assertRaises(ValueError, SeqIO.index_db,
":memory:", filenames=["dummy"],
key_function=add_prefix)
rec_dict.close()
del rec_dict
# Saving to file...
index_tmp = filename + ".key.idx"
if os.path.isfile(index_tmp):
os.remove(index_tmp)
rec_dict = SeqIO.index_db(index_tmp, [filename], format, alphabet,
add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
del rec_dict
# Now reload it...
rec_dict = SeqIO.index_db(index_tmp, [filename], format, alphabet,
add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
del rec_dict
# Now reload without passing filenames and format
rec_dict = SeqIO.index_db(index_tmp, alphabet=alphabet,
key_function=add_prefix)
self.check_dict_methods(rec_dict, key_list, id_list)
rec_dict.close()
rec_dict._con.close() # hack for PyPy
del rec_dict
os.remove(index_tmp)
# Done
def check_dict_methods(self, rec_dict, keys, ids):
self.assertEqual(set(keys), set(rec_dict))
# This is redundant, I just want to make sure len works:
self.assertEqual(len(keys), len(rec_dict))
# Make sure boolean evaluation works
self.assertEqual(bool(keys), bool(rec_dict))
for key, id in zip(keys, ids):
self.assertIn(key, rec_dict)
self.assertEqual(id, rec_dict[key].id)
self.assertEqual(id, rec_dict.get(key).id)
# Check non-existant keys,
assert chr(0) not in keys, "Bad example in test"
try:
rec = rec_dict[chr(0)]
raise ValueError("Accessing a non-existent key should fail")
except KeyError:
pass
self.assertEqual(rec_dict.get(chr(0)), None)
self.assertEqual(rec_dict.get(chr(0), chr(1)), chr(1))
if hasattr(dict, "iteritems"):
# Python 2.x
for key, rec in rec_dict.items():
self.assertIn(key, keys)
self.assertTrue(isinstance(rec, SeqRecord))
self.assertIn(rec.id, ids)
else:
# Python 3
assert not hasattr(rec_dict, "iteritems")
for key, rec in rec_dict.items():
self.assertIn(key, keys)
self.assertTrue(isinstance(rec, SeqRecord))
self.assertIn(rec.id, ids)
for rec in rec_dict.values():
self.assertIn(key, keys)
self.assertTrue(isinstance(rec, SeqRecord))
self.assertIn(rec.id, ids)
# Check the following fail
self.assertRaises(NotImplementedError, rec_dict.popitem)
self.assertRaises(NotImplementedError, rec_dict.pop, chr(0))
self.assertRaises(NotImplementedError, rec_dict.pop, chr(0), chr(1))
self.assertRaises(NotImplementedError, rec_dict.clear)
self.assertRaises(NotImplementedError, rec_dict.__setitem__, "X", None)
self.assertRaises(NotImplementedError, rec_dict.copy)
self.assertRaises(NotImplementedError, rec_dict.fromkeys, [])
def get_raw_check(self, filename, format, alphabet, comp):
# Also checking the key_function here
if comp:
h = gzip.open(filename, "rb")
raw_file = h.read()
h.close()
h = gzip_open(filename, format)
id_list = [rec.id.lower() for rec in
SeqIO.parse(h, format, alphabet)]
h.close()
else:
h = open(filename, "rb")
raw_file = h.read()
h.close()
id_list = [rec.id.lower() for rec in
SeqIO.parse(filename, format, alphabet)]
if format in ["sff"]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonParserWarning)
rec_dict = SeqIO.index(filename, format, alphabet,
key_function=lambda x: x.lower()) # noqa: E731
if sqlite3:
rec_dict_db = SeqIO.index_db(":memory:", filename, format, alphabet,
key_function=lambda x: x.lower()) # noqa: E731
else:
rec_dict = SeqIO.index(filename, format, alphabet,
key_function=lambda x: x.lower()) # noqa: E731
if sqlite3:
rec_dict_db = SeqIO.index_db(":memory:", filename, format, alphabet,
key_function=lambda x: x.lower()) # noqa: E731
self.assertEqual(set(id_list), set(rec_dict))
if sqlite3:
self.assertEqual(set(id_list), set(rec_dict_db))
self.assertEqual(len(id_list), len(rec_dict))
for key in id_list:
self.assertIn(key, rec_dict)
self.assertEqual(key, rec_dict[key].id.lower())
self.assertEqual(key, rec_dict.get(key).id.lower())
raw = rec_dict.get_raw(key)
self.assertTrue(isinstance(raw, bytes),
"Didn't get bytes from %s get_raw" % format)
self.assertTrue(raw.strip())
self.assertIn(raw, raw_file)
if sqlite3:
raw_db = rec_dict_db.get_raw(key)
# Via index using format-specific get_raw which scans the file,
# Via index_db in general using raw length found when indexing.
self.assertEqual(raw, raw_db,
"index and index_db .get_raw() different for %s" % format)
rec1 = rec_dict[key]
# Following isn't very elegant, but it lets me test the
# __getitem__ SFF code is working.
if format in SeqIO._BinaryFormats:
handle = BytesIO(raw)
else:
handle = StringIO(raw.decode())
if format == "sff":
rec2 = SeqIO.SffIO._sff_read_seq_record(
handle,
rec_dict._proxy._flows_per_read,
rec_dict._proxy._flow_chars,
rec_dict._proxy._key_sequence,
rec_dict._proxy._alphabet,
trim=False)
elif format == "sff-trim":
rec2 = SeqIO.SffIO._sff_read_seq_record(
handle,
rec_dict._proxy._flows_per_read,
rec_dict._proxy._flow_chars,
rec_dict._proxy._key_sequence,
rec_dict._proxy._alphabet,
trim=True)
elif format == "uniprot-xml":
self.assertTrue(raw.startswith(b"<entry "))
self.assertTrue(raw.endswith(b"</entry>"))
# Currently the __getitem__ method uses this
# trick too, but we hope to fix that later
raw = """<?xml version='1.0' encoding='UTF-8'?>
<uniprot xmlns="http://uniprot.org/uniprot"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://uniprot.org/uniprot
http://www.uniprot.org/support/docs/uniprot.xsd">
%s
</uniprot>
""" % raw.decode()
handle = StringIO(raw)
rec2 = SeqIO.read(handle, format, alphabet)
else:
rec2 = SeqIO.read(handle, format, alphabet)
self.assertEqual(True, compare_record(rec1, rec2))
rec_dict.close()
del rec_dict
if sqlite3:
def test_duplicates_index_db(self):
"""Index file with duplicate identifiers with Bio.SeqIO.index_db()."""
self.assertRaises(ValueError, SeqIO.index_db, ":memory:",
["Fasta/dups.fasta"], "fasta")
def test_duplicates_index(self):
"""Index file with duplicate identifiers with Bio.SeqIO.index()."""
self.assertRaises(ValueError, SeqIO.index, "Fasta/dups.fasta", "fasta")
def test_duplicates_to_dict(self):
"""Index file with duplicate identifiers with Bio.SeqIO.to_dict()."""
handle = open("Fasta/dups.fasta")
iterator = SeqIO.parse(handle, "fasta")
self.assertRaises(ValueError, SeqIO.to_dict, iterator)
handle.close()
class IndexOrderingSingleFile(unittest.TestCase):
f = "GenBank/NC_000932.faa"
ids = [r.id for r in SeqIO.parse(f, "fasta")]
def test_order_to_dict(self):
"""Check to_dict preserves order in indexed file."""
d = SeqIO.to_dict(SeqIO.parse(self.f, "fasta"))
self.assertEqual(self.ids, list(d))
def test_order_index(self):
"""Check index preserves order in indexed file."""
d = SeqIO.index(self.f, "fasta")
self.assertEqual(self.ids, list(d))
if sqlite3:
def test_order_index_db(self):
"""Check index_db preserves ordering indexed file."""
d = SeqIO.index_db(":memory:", [self.f], "fasta")
self.assertEqual(self.ids, list(d))
if sqlite3:
class IndexOrderingManyFiles(unittest.TestCase):
def test_order_index_db(self):
"""Check index_db preserves order in multiple indexed files."""
files = ["GenBank/NC_000932.faa", "GenBank/NC_005816.faa"]
ids = []
for f in files:
ids.extend(r.id for r in SeqIO.parse(f, "fasta"))
d = SeqIO.index_db(":memory:", files, "fasta")
self.assertEqual(ids, list(d))
tests = [
("Ace/contig1.ace", "ace", generic_dna),
("Ace/consed_sample.ace", "ace", None),
("Ace/seq.cap.ace", "ace", generic_dna),
("Quality/wrapping_original_sanger.fastq", "fastq", None),
("Quality/example.fastq", "fastq", None), # Unix newlines
("Quality/example.fastq", "fastq-sanger", generic_dna),
("Quality/example_dos.fastq", "fastq", None), # DOS/Windows newlines
("Quality/tricky.fastq", "fastq", generic_nucleotide),
("Quality/sanger_faked.fastq", "fastq-sanger", generic_dna),
("Quality/solexa_faked.fastq", "fastq-solexa", generic_dna),
("Quality/illumina_faked.fastq", "fastq-illumina", generic_dna),
("Quality/zero_length.fastq", "fastq", generic_dna),
("EMBL/epo_prt_selection.embl", "embl", None),
("EMBL/U87107.embl", "embl", None),
("EMBL/TRBG361.embl", "embl", None),
("EMBL/kipo_prt_sample.embl", "embl", None),
("EMBL/A04195.imgt", "embl", None), # Not a proper EMBL file, an IMGT file
("EMBL/A04195.imgt", "imgt", None),
("EMBL/hla_3260_sample.imgt", "imgt", None),
("EMBL/patents.embl", "embl", generic_protein),
("EMBL/AAA03323.embl", "embl", None),
("GenBank/NC_000932.faa", "fasta", generic_protein),
("GenBank/NC_005816.faa", "fasta", generic_protein),
("GenBank/NC_005816.tsv", "tab", generic_protein),
("GenBank/NC_005816.ffn", "fasta", generic_dna),
("GenBank/NC_005816.fna", "fasta", generic_dna),
("GenBank/NC_005816.gb", "gb", None),
("GenBank/cor6_6.gb", "genbank", None),
("GenBank/empty_accession.gbk", "gb", None),
("GenBank/empty_version.gbk", "gb", None),
("IntelliGenetics/vpu_nucaligned.txt", "ig", generic_nucleotide),
("IntelliGenetics/TAT_mase_nuc.txt", "ig", None),
("IntelliGenetics/VIF_mase-pro.txt", "ig", generic_protein),
("Phd/phd1", "phd", generic_dna),
("Phd/phd2", "phd", None),
("Phd/phd_solexa", "phd", generic_dna),
("Phd/phd_454", "phd", generic_dna),
("NBRF/B_nuc.pir", "pir", generic_nucleotide),
("NBRF/Cw_prot.pir", "pir", generic_protein),
("NBRF/clustalw.pir", "pir", None),
("SwissProt/sp001", "swiss", None),
("SwissProt/sp010", "swiss", None),
("SwissProt/sp016", "swiss", None),
("SwissProt/multi_ex.txt", "swiss", None),
("SwissProt/multi_ex.xml", "uniprot-xml", None),
("SwissProt/multi_ex.fasta", "fasta", None),
("Roche/E3MFGYR02_random_10_reads.sff", "sff", generic_dna),
("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim", generic_dna),
("Roche/E3MFGYR02_index_at_start.sff", "sff", generic_dna),
("Roche/E3MFGYR02_index_in_middle.sff", "sff", generic_dna),
("Roche/E3MFGYR02_alt_index_at_start.sff", "sff", generic_dna),
("Roche/E3MFGYR02_alt_index_in_middle.sff", "sff", generic_dna),
("Roche/E3MFGYR02_alt_index_at_end.sff", "sff", generic_dna),
("Roche/E3MFGYR02_no_manifest.sff", "sff", generic_dna),
("Roche/greek.sff", "sff", generic_nucleotide),
("Roche/greek.sff", "sff-trim", generic_nucleotide),
("Roche/paired.sff", "sff", None),
("Roche/paired.sff", "sff-trim", None),
]
for filename1, format, alphabet in tests:
assert format in _FormatToRandomAccess
tasks = [(filename1, None)]
if do_bgzf and os.path.isfile(filename1 + ".bgz"):
tasks.append((filename1 + ".bgz", "bgzf"))
for filename2, comp in tasks:
def funct(fn, fmt, alpha, c):
f = lambda x: x.simple_check(fn, fmt, alpha, c) # noqa: E731
f.__doc__ = "Index %s file %s defaults" % (fmt, fn)
return f
setattr(IndexDictTests, "test_%s_%s_simple"
% (format, filename2.replace("/", "_").replace(".", "_")),
funct(filename2, format, alphabet, comp))
del funct
def funct(fn, fmt, alpha, c):
f = lambda x: x.key_check(fn, fmt, alpha, c) # noqa: E731
f.__doc__ = "Index %s file %s with key function" % (fmt, fn)
return f
setattr(IndexDictTests, "test_%s_%s_keyf"
% (format, filename2.replace("/", "_").replace(".", "_")),
funct(filename2, format, alphabet, comp))
del funct
def funct(fn, fmt, alpha, c):
f = lambda x: x.get_raw_check(fn, fmt, alpha, c) # noqa: E731
f.__doc__ = "Index %s file %s get_raw" % (fmt, fn)
return f
setattr(IndexDictTests, "test_%s_%s_get_raw"
% (format, filename2.replace("/", "_").replace(".", "_")),
funct(filename2, format, alphabet, comp))
del funct
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
extcap_ot.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import tempfile
import argparse
import subprocess
import threading
import logging
import re
from spinel.stream import StreamOpen
from spinel.const import SPINEL
from spinel.codec import WpanApi
from serial.tools.list_ports import comports
from enum import Enum
# Nodeid is required to execute ot-ncp-ftd for its sim radio socket port.
# This is maximum that works for MacOS.
DEFAULT_NODEID = 34
COMMON_BAUDRATE = [460800, 115200, 9600]
class Config(Enum):
CHANNEL = 0
BAUDRATE = 1
TAP = 2
class _StreamCloser:
def __init__(self, stream):
self._stream = stream
def __enter__(self):
return self._stream
def __exit__(self, exc_type, exc_val, exc_tb):
self._stream.close()
def extcap_config(interface, option, extcap_version):
"""List Configuration for the given interface"""
args = []
values = []
args.append(
(Config.CHANNEL.value, '--channel', 'Channel', 'IEEE 802.15.4 channel',
'selector', '{required=true}{default=11}'))
match = re.match(r'^(\d+)(\.\d+)*$', extcap_version)
if match and int(match.group(1)) >= 3:
args.append((Config.TAP.value, '--tap',
'IEEE 802.15.4 TAP (only for Wireshark3.0 and later)',
'IEEE 802.15.4 TAP', 'boolflag', '{default=yes}'))
for arg in args:
print('arg {number=%d}{call=%s}{display=%s}{tooltip=%s}{type=%s}%s' %
arg)
values = values + [(Config.CHANNEL.value, '%d' % i, '%d' % i,
'true' if i == 11 else 'false') for i in range(11, 27)]
for value in values:
print('value {arg=%d}{value=%s}{display=%s}{default=%s}' % value)
def extcap_dlts(interface):
"""List DLTs for the given interface"""
print(
'dlt {number=195}{name=IEEE802_15_4_WITHFCS}{display=IEEE 802.15.4 with FCS}'
)
print('dlt {number=283}{name=IEEE802_15_4_TAP}{display=IEEE 802.15.4 TAP}')
def serialopen(interface, log_file):
"""
Open serial to indentify OpenThread sniffer
:param interface: string, eg: '/dev/ttyUSB0 - Zolertia Firefly platform', '/dev/ttyACM1 - nRF52840 OpenThread Device'
"""
sys.stdout = log_file
sys.stderr = log_file
interface = str(interface).split()[0]
baudrate = None
for speed in COMMON_BAUDRATE:
with _StreamCloser(StreamOpen('u', interface, False, baudrate=speed)) as stream, \
WpanApi(stream, nodeid=DEFAULT_NODEID, timeout=0.1) as wpan_api:
# result should not be None for both NCP and RCP
result = wpan_api.prop_get_value(
SPINEL.PROP_CAPS) # confirm OpenThread Sniffer
# check whether or not is OpenThread Sniffer
if result is not None:
baudrate = speed
break
if baudrate is not None:
if sys.platform == 'win32':
# Wireshark only shows the value of key `display`('OpenThread Sniffer').
# Here intentionally appends interface in the end (e.g. 'OpenThread Sniffer: COM0').
print('interface {value=%s:%s}{display=OpenThread Sniffer %s}' %
(interface, baudrate, interface),
file=sys.__stdout__,
flush=True)
else:
# On Linux or MacOS, wireshark will show the concatenation of the content of `display`
# and `interface` by default (e.g. 'OpenThread Sniffer: /dev/ttyACM0').
print('interface {value=%s:%s}{display=OpenThread Sniffer}' %
(interface, baudrate),
file=sys.__stdout__,
flush=True)
def extcap_interfaces():
"""List available interfaces to capture from"""
log_file = open(
os.path.join(tempfile.gettempdir(), 'extcap_ot_interfaces.log'), 'w')
print(
'extcap {version=1.0.0}{display=OpenThread Sniffer}{help=https://github.com/openthread/pyspinel}'
)
threads = []
for interface in comports():
th = threading.Thread(target=serialopen, args=(interface, log_file))
threads.append(th)
th.start()
for th in threads:
th.join()
def extcap_capture(interface, fifo, control_in, control_out, channel, tap):
"""Start the sniffer to capture packets"""
# baudrate = detect_baudrate(interface)
interface_port = str(interface).split(':')[0]
interface_baudrate = str(interface).split(':')[1]
with _StreamCloser(StreamOpen('u', interface_port, False, baudrate=int(interface_baudrate))) as stream, \
WpanApi(stream, nodeid=DEFAULT_NODEID) as wpan_api:
wpan_api.prop_set_value(SPINEL.PROP_PHY_ENABLED, 1)
if sys.platform == 'win32':
python_path = subprocess.Popen(
'py -3 -c "import sys; print(sys.executable)"',
stdout=subprocess.PIPE,
shell=True,
).stdout.readline().decode().strip()
sniffer_py = os.path.join(os.path.dirname(python_path), 'Scripts',
'sniffer.py')
cmd = ['python', sniffer_py]
else:
cmd = ['sniffer.py']
cmd += [
'-c', channel, '-u', interface_port, '--crc', '--rssi', '-b',
interface_baudrate, '-o',
str(fifo), '--is-fifo', '--use-host-timestamp'
]
if tap:
cmd.append('--tap')
subprocess.Popen(cmd).wait()
def extcap_close_fifo(fifo):
""""Close extcap fifo"""
# This is apparently needed to workaround an issue on Windows/macOS
# where the message cannot be read. (really?)
fh = open(fifo, 'wb', 0)
fh.close()
if __name__ == '__main__':
# Capture options
parser = argparse.ArgumentParser(
description='OpenThread Sniffer extcap plugin')
# Extcap Arguments
parser.add_argument('--extcap-interfaces',
help='Provide a list of interfaces to capture from',
action='store_true')
parser.add_argument('--extcap-interface',
help='Provide the interface to capture from')
parser.add_argument('--extcap-dlts',
help='Provide a list of dlts for the given interface',
action='store_true')
parser.add_argument(
'--extcap-config',
help='Provide a list of configurations for the given interface',
action='store_true')
parser.add_argument('--extcap-reload-option',
help='Reload elements for the given option')
parser.add_argument('--capture',
help='Start the capture routine',
action='store_true')
parser.add_argument(
'--fifo',
help='Use together with capture to provide the fifo to dump data to')
parser.add_argument(
'--extcap-capture-filter',
help='Used together with capture to provide a capture filter')
parser.add_argument('--extcap-control-in',
help='Used to get control messages from toolbar')
parser.add_argument('--extcap-control-out',
help='Used to send control messages to toolbar')
parser.add_argument('--extcap-version', help='Wireshark Version')
# Interface Arguments
parser.add_argument('--channel',
help='IEEE 802.15.4 capture channel [11-26]')
parser.add_argument(
'--tap',
help='IEEE 802.15.4 TAP (only for Wireshark3.0 and later)',
action='store_true')
try:
args, unknown = parser.parse_known_args()
except argparse.ArgumentError as e:
parser.exit('ERROR_ARG: %s' % str(e))
extcap_version = ''
version_path = os.path.join(tempfile.gettempdir(), 'extcap_ot_version')
if args.extcap_version:
extcap_version = args.extcap_version
with open(version_path, mode='w') as f:
f.write(extcap_version)
else:
try:
with open(version_path, mode='r') as f:
extcap_version = f.read()
except FileNotFoundError:
pass
if len(unknown) > 0:
parser.exit('Sniffer %d unknown arguments given: %s' %
(len(unknown), unknown))
if len(sys.argv) == 0:
parser.print_help()
parser.exit('No arguments given!')
if not args.extcap_interfaces and args.extcap_interface is None:
parser.exit(
'An interface must be provided or the selection must be displayed')
if args.extcap_interfaces:
extcap_interfaces()
sys.exit(0)
if args.extcap_config:
extcap_config(args.extcap_interface, '', extcap_version)
elif args.extcap_dlts:
extcap_dlts(args.extcap_interface)
elif args.capture:
if args.fifo is None:
parser.exit('The fifo must be provided to capture')
try:
extcap_capture(args.extcap_interface, args.fifo,
args.extcap_control_in, args.extcap_control_out,
args.channel, args.tap)
except KeyboardInterrupt:
pass
except Exception as e:
logging.exception(e)
parser.exit('ERROR_INTERNAL')
else:
parser.print_help()
parser.exit('ERROR_USAGE')
|
flaskwebgui.py
|
__version__ = "0.3.4"
import os
import sys
import time
from datetime import datetime
import logging
import tempfile
import socketserver
import subprocess as sps
from inspect import isfunction
from threading import Lock, Thread
logging.basicConfig(level=logging.INFO, format='flaskwebgui - [%(levelname)s] - %(message)s')
# UTILS
def find_chrome_mac():
chrome_names = ['Google Chrome', 'Chromium']
for chrome_name in chrome_names:
default_dir = r'/Applications/{}.app/Contents/MacOS/{}'.format(chrome_name, chrome_name)
if os.path.exists(default_dir):
return default_dir
# use mdfind ci to locate Chrome in alternate locations and return the first one
name = '{}.app'.format(chrome_name)
alternate_dirs = [x for x in sps.check_output(["mdfind", name]).decode().split('\n') if x.endswith(name)]
if len(alternate_dirs):
return alternate_dirs[0] + '/Contents/MacOS/{}'.format(chrome_name)
return None
def find_chrome_linux():
try:
import whichcraft as wch
except Exception as e:
raise Exception("whichcraft module is not installed/found \
please fill browser_path parameter or install whichcraft!") from e
chrome_names = ['chromium-browser',
'chromium',
'google-chrome',
'google-chrome-stable']
for name in chrome_names:
chrome = wch.which(name)
if chrome is not None:
return chrome
return None
def find_chrome_win():
#using edge by default since it's build on chromium
edge_path = "C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe"
if os.path.exists(edge_path):
return edge_path
import winreg as reg
reg_path = r'SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe'
chrome_path = None
last_exception = None
for install_type in reg.HKEY_CURRENT_USER, reg.HKEY_LOCAL_MACHINE:
try:
reg_key = reg.OpenKey(install_type, reg_path, 0, reg.KEY_READ)
chrome_path = reg.QueryValue(reg_key, None)
reg_key.Close()
except WindowsError as e:
last_exception = e
else:
if chrome_path and len(chrome_path) > 0:
break
# Only log some debug info if we failed completely to find chrome
if not chrome_path:
logging.exception(last_exception)
logging.error("Failed to detect chrome location from registry")
else:
logging.info(f"Chrome path detected as: {chrome_path}")
return chrome_path
def get_default_chrome_path():
"""
Credits for get_instance_path, find_chrome_mac, find_chrome_linux, find_chrome_win funcs
got from: https://github.com/ChrisKnott/Eel/blob/master/eel/chrome.py
"""
if sys.platform in ['win32', 'win64']:
return find_chrome_win()
elif sys.platform in ['darwin']:
return find_chrome_mac()
elif sys.platform.startswith('linux'):
return find_chrome_linux()
# class FlaskwebguiDjangoMiddleware:
#TODO help needed here
# def __init__(self, get_response=None):
# self.get_response = get_response
# def __call__(self, request):
# response = self.get_response(request)
# return response
current_timestamp = None
class FlaskUI:
def __init__(self,
app,
start_server='flask',
width=800,
height=600,
maximized=False,
fullscreen=False,
browser_path=None,
socketio=None,
on_exit=None,
idle_interval=5,
close_server_on_exit=True
) -> None:
self.app = app
self.start_server = str(start_server).lower()
self.width = str(width)
self.height= str(height)
self.fullscreen = fullscreen
self.maximized = maximized
self.browser_path = browser_path if browser_path else get_default_chrome_path()
self.socketio = socketio
self.on_exit = on_exit
self.idle_interval = idle_interval
self.close_server_on_exit = close_server_on_exit
self.set_url()
self.webserver_dispacher = {
"flask": self.start_flask,
"flask-socketio": self.start_flask_socketio,
"django": self.start_django,
"fastapi": self.start_fastapi
}
self.supported_frameworks = list(self.webserver_dispacher.keys())
if self.close_server_on_exit:
self.lock = Lock()
def update_timestamp(self):
self.lock.acquire()
global current_timestamp
current_timestamp = datetime.now()
self.lock.release()
def run(self):
"""
Starts 3 threads one for webframework server and one for browser gui
"""
if self.close_server_on_exit:
self.update_timestamp()
t_start_webserver = Thread(target=self.start_webserver)
t_open_chromium = Thread(target=self.open_chromium)
t_stop_webserver = Thread(target=self.stop_webserver)
threads = [t_start_webserver, t_open_chromium, t_stop_webserver]
for t in threads: t.start()
for t in threads: t.join()
def set_url(self):
with socketserver.TCPServer(("localhost", 0), None) as s:
free_port = s.server_address[1]
self.host = '127.0.0.1'
self.port = free_port
self.localhost = f"http://{self.host}:{self.port}"
def start_webserver(self):
if isfunction(self.start_server):
self.start_server()
if self.start_server not in self.supported_frameworks:
raise Exception(f"'start_server'({self.start_server}) not in {','.join(self.supported_frameworks)} and also not a function which starts the webframework")
self.webserver_dispacher[self.start_server]()
def add_flask_middleware(self):
@self.app.after_request
def keep_alive_after_request(response):
self.keep_server_running()
return response
@self.app.route("/flaskwebgui-keep-server-alive")
def keep_alive_pooling():
self.keep_server_running()
return "ok"
def start_flask(self):
if self.close_server_on_exit:
self.add_flask_middleware()
try:
import waitress
waitress.serve(self.app, host=self.host, port=self.port)
except:
self.app.run(host=self.host, port=self.port)
def start_flask_socketio(self):
if self.close_server_on_exit:
self.add_flask_middleware()
self.socketio.run(self.app, host=self.host, port=self.port, debug=False)
def start_django(self):
try:
import waitress
waitress.serve(self.app, host=self.host, port=self.port)
except:
try:#linux and mac
os.system(f"python3 manage.py runserver {self.port}")
except:#windows
os.system(f"python manage.py runserver {self.port}")
def add_fastapi_middleware(self):
@self.app.middleware("http")
async def keep_alive_after_request(request, call_next):
response = await call_next(request)
self.keep_server_running()
return response
@self.app.route("/flaskwebgui-keep-server-alive")
async def keep_alive_pooling():
self.keep_server_running()
return "ok"
def start_fastapi(self):
import uvicorn
if self.close_server_on_exit:
self.add_fastapi_middleware()
uvicorn.run(self.app, host=self.host, port=self.port, log_level="warning")
def open_chromium(self):
"""
Open the browser selected (by default it looks for chrome)
# https://peter.sh/experiments/chromium-command-line-switches/
"""
logging.info(f"Opening browser at {self.localhost}")
temp_profile_dir = os.path.join(tempfile.gettempdir(), "flaskwebgui")
if self.browser_path:
launch_options = None
if self.fullscreen:
launch_options = ["--start-fullscreen"]
elif self.maximized:
launch_options = ["--start-maximized"]
else:
launch_options = [f"--window-size={self.width},{self.height}"]
options = [
self.browser_path,
f"--user-data-dir={temp_profile_dir}",
"--new-window",
"--no-sandbox",
"--no-first-run",
# "--window-position=0,0"
] + launch_options + [f'--app={self.localhost}']
sps.Popen(options, stdout=sps.PIPE, stderr=sps.PIPE, stdin=sps.PIPE)
else:
import webbrowser
webbrowser.open_new(self.localhost)
def stop_webserver(self):
if self.close_server_on_exit is False: return
#TODO add middleware for Django
if self.start_server == 'django':
logging.info("Middleware not implemented (yet) for Django.")
return
while True:
self.lock.acquire()
global current_timestamp
delta_seconds = (datetime.now() - current_timestamp).total_seconds()
self.lock.release()
if delta_seconds > self.idle_interval:
logging.info("App closed")
break
time.sleep(self.idle_interval)
if isfunction(self.on_exit):
logging.info(f"Executing {self.on_exit.__name__} function...")
self.on_exit()
logging.info("Closing connections...")
os.kill(os.getpid(), 9)
def keep_server_running(self):
self.update_timestamp()
return "Ok"
|
corescrape_thread.py
|
"""
Core Scrape Threading
Thread control for this package.
"""
import signal
from warnings import warn
from queue import Queue
from threading import Thread
from . import corescrape_event
from core import CoreScrape
from core.exceptions import CoreScrapeTimeout
# pylint: disable=invalid-name, too-few-public-methods, multiple-statements
# pylint: disable=bare-except, too-many-arguments, too-many-instance-attributes
def alarm_handler(signum, frame):
"""Handles the alarm."""
raise CoreScrapeTimeout
class CoreScrapeThread(CoreScrape):
"""
Core Scrape Thread.
Uses multiples threads to request pages and parse its content.
A valid rotator must be passed to produce each request using a new proxy
and make it less likely to be red flagged as a bot or scrapper by internet
service providers. The user could pass a parser (CoreScrape class or custom
class with a 'parse' method) to parse the response and avoid having the need
to store the whole page for postprocessing.
This controller also gives the user the option to set up a timer, in seconds,
to raise a timeout. The timer is set if the user provided an integer to param
'timeout' during 'start_threads' method processing. The timer is unset in
'wait_for_threads' method.
Params:
nthreads: int. Desired number of threads. Once the method 'start_threads' is
called, the controller will try to split the given input into chunks of
number 'nthreads'. If it is not possible to split in 'nthreads' chunks,
then the actual number of threads is available in 'actualnthreads'.
rotator: corescrape.proxy.Rotator (preferably). Uses this rotator to make
requests using different proxies and user agents. There is always the
possibility to pass the 'requests' module to this parameter, but that is
not advised as the control of proxies and user-agents is not automatic.
parser: corescrape.pgparser.SimpleParser, based on or None. Uses this to
parse the page content and extract the useful information, making it
less memory expensive. If no argument is given, the thread controller
will return a list of the full pages collected.
timeout: int or None. Time in seconds to configure the timeout process.
Set up a timer to raise an event and stop the threads once the time is
reached.
logoperator: corescrape.logs.LogOperator or None. Log to be fed with process
runtime information.
"""
def __init__(self, nthreads, rotator, parser=None, timeout=None,
logoperator=None):
"""Constructor."""
if timeout is not None and not isinstance(timeout, int):
raise TypeError("Param. 'timeout' must be 'int' or 'NoneType'")
# inputs
self.nthreads = nthreads
self.actualnthreads = nthreads
self.rotator = rotator
self.parser = parser
self.timeout = timeout # CAREFUL! This is not timeout for requests
self.timeoutset = False
# control attrs
self.queue = Queue()
self.event = corescrape_event.CoreScrapeEvent(logoperator=logoperator)
self.threads = []
super().__init__(logoperator=logoperator)
def __split(self, a):
"""
Tries to split the input into chunks for each thread.
Input must be a list.
"""
if not isinstance(a, list):
raise TypeError("Param 'a' must be 'list'")
n = self.nthreads # desired number of threads
k, m = divmod(len(a), n)
split = [a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n)]
split = [part for part in split if part] # drops empty chunks
# actual number of threads. Sometimes differs from 'nthreads'
self.actualnthreads = len(split)
return split
def __warn_wait_threads(self):
"""Produce warning to wait for threads if needed."""
if self.threads:
warn(
'There are threads running. Wait for them to stop before calling '
'this method'
)
return True
return False
def __set_timeout(self):
"""
If seconds for timeout were informed in the constructor, will set an alarm
for timeout. Once timeout is reached, the iteration is broken and return
as expected.
"""
if self.timeout:
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(self.timeout)
self.log('CoreScrapeThread set the timeout for {} seconds.'.format(
self.timeout), tmsg='info')
self.timeoutset = True
def __disarm_timeout(self):
"""Turn off the timeout."""
if self.timeoutset:
self.timeoutset = False
signal.alarm(0)
self.log('CoreScrapeThread disarmed the timeout.', tmsg='info')
def __check_am_i_the_last(self):
"""Check if this thread is the last and if it should set an event."""
condition = self.queue.qsize() + 1 >= self.actualnthreads
condition = condition and self.event.state.is_EXECUTING()
if condition:
self.event.state.set_DUTY_FREE()
def __iterate(self, threadid, data, *args):
"""Do iterations in threads, each one calling the passed code."""
# pylint: disable=unused-argument
self.log('Starting iteration in threadid {} for {} items'.format(
threadid, len(data)))
res = []
for url in data:
# the reason here does not matter. If it is set, break out
if self.event.is_set(): break
try:
page = self.rotator.request(url, self.event, threadid=threadid)
except:
self.event.state.set_ABORT_THREAD()
break
if page is None: continue # not able to retrieve the page
if self.parser is None:
res.append(page)
self.log('Storing whole response for {}. Thread {}'.format(
url, threadid))
elif page.status_code == 404:
self.log('URL {} returned a 404. Thread {}'.format(url, threadid),
tmsg='warning')
res.append({url: None}) # points it was collected but useless
else:
_res = self.parser.parse(page, threadid=threadid)
if not _res:
self.log('URL {} could not be parsed. Thread {}'.format(
url, threadid))
continue # no info collected, must go on
self.log('URL {} collected. Thread {}'.format(url, threadid),
tmsg='header')
res.append({url: _res})
self.__check_am_i_the_last()
return res
def start_threads(self, to_split_params, *fixed_args):
"""Starts threads."""
def test_if_urls(p):
return [a.startswith('http://') or a.startswith('https://') for a in p]
# pylint: disable=no-value-for-parameter
abort = self.__warn_wait_threads()
if abort:
return False
if not all(test_if_urls(to_split_params)):
raise ValueError('List of strings must begin with protocol')
self.log('Starting threads for {} items'.format(len(to_split_params)))
self.threads = []
self.event.state.set_EXECUTING()
for threadid, split in enumerate(self.__split(to_split_params)):
pargs = (threadid, split, *fixed_args)
thread = Thread(
target=lambda q, *args: q.put(self.__iterate(*args)),
args=(self.queue, *pargs)
)
thread.start()
self.threads.append(thread)
self.__set_timeout()
return True
def wait_for_threads(self):
"""Wait lock for threads."""
try:
self.event.wait()
except KeyboardInterrupt:
self.event.state.set_ABORT_USER()
except CoreScrapeTimeout:
self.event.state.set_TIMEOUT()
finally:
self.__disarm_timeout()
for thread in self.threads:
thread.join()
self.event.clear()
self.threads = []
def join_responses(self):
"""Join responses from the threads."""
abort = self.__warn_wait_threads()
if abort:
return []
res = []
while not self.queue.empty():
res += self.queue.get()
return res
def is_sentenced(self):
"""
Informs if the thread controller is sentenced due to the last event state.
"""
sentenced = self.event.state.is_sentenced()
if sentenced:
self.event.state.set_FINISHED()
return sentenced
|
threading chat client.py
|
import threading
import socket
from urllib import request
INITIAL_PORT = 80
ENCODING = 'utf-8'
def send(sock: socket.socket):
while True:
msg = input("send << ")
encode = msg.encode(ENCODING)
sock.send(len(encode).to_bytes(32, 'big') + encode)
def recv(sock: socket.socket):
while True:
msg = sock.recv(1024)
print(f'\nrecv >> {msg.decode(ENCODING)}')
def client(ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
print(f"Connected to Server")
sender = threading.Thread(target=send, args=(sock,))
receiver = threading.Thread(target=recv, args=(sock,))
sender.start()
receiver.start()
sender.join()
receiver.join()
if __name__ == '__main__':
raw = input("Address: ")
ip_, port_ = raw.split(':')
client(ip_, int(port_))
|
MdnsListener.py
|
# Copyright (C) 2018 Riedel Communications GmbH & Co. KG
#
# Modifications Copyright 2018 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from queue import Queue
class MdnsListener(object):
def __init__(self, zeroconf):
self.zeroconf = zeroconf
self.services = list()
self.resolve_queue = Queue()
def add_service(self, zeroconf, srv_type, name):
self.resolve_queue.put((srv_type, name))
t = Thread(target=self.worker)
t.daemon = True
t.start()
def get_service_list(self):
self.resolve_queue.join()
return self.services
def worker(self):
item = self.resolve_queue.get()
info = self.zeroconf.get_service_info(item[0], item[1])
if info is not None:
self.services.append(info)
self.resolve_queue.task_done()
|
base_cli_srv.py
|
#!/usr/bin/python3
# Copyright 2016 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`base_cli_srv` --- Base classes for single packet end2end testing
======================================================================
"""
# Stdlib
import argparse
import copy
import logging
import os
import random
import socket
import struct
import sys
import threading
import time
from abc import ABCMeta, abstractmethod
from itertools import product
# SCION
from endhost.sciond import SCIOND_API_SOCKDIR, SCIONDaemon
from lib.defines import AS_LIST_FILE, GEN_PATH
from lib.log import init_logging
from lib.main import main_wrapper
from lib.packet.host_addr import (
haddr_get_type,
haddr_parse_interface,
)
from lib.packet.packet_base import PayloadRaw
from lib.packet.path import SCIONPath
from lib.packet.scion import SCIONL4Packet, build_base_hdrs
from lib.packet.scion_addr import ISD_AS, SCIONAddr
from lib.packet.scion_udp import SCIONUDPHeader
from lib.socket import ReliableSocket
from lib.thread import kill_self, thread_safety_net
from lib.util import (
Raw,
handle_signals,
load_yaml_file,
)
API_TOUT = 15
class ResponseRV:
FAILURE = 0
SUCCESS = 1
RETRY = 2
class TestBase(object, metaclass=ABCMeta):
def __init__(self, sd, data, finished, addr, timeout=1.0):
self.sd = sd
self.data = data
self.finished = finished
self.addr = addr
self._timeout = timeout
self.sock = self._create_socket(addr)
assert self.sock
self.success = None
@abstractmethod
def run(self):
raise NotImplementedError
def _create_socket(self, addr):
sock = ReliableSocket(reg=(addr, 0, True, None))
sock.settimeout(self._timeout)
return sock
def _recv(self):
try:
packet = self.sock.recv()[0]
except socket.timeout:
return None
return SCIONL4Packet(packet)
def _send_pkt(self, spkt, next_=None):
next_hop, port = next_ or self.sd.get_first_hop(spkt)
assert next_hop is not None
logging.debug("Sending (via %s:%s):\n%s", next_hop, port, spkt)
self.sock.send(spkt.pack(), (next_hop, port))
def _shutdown(self):
self.sock.close()
class TestClientBase(TestBase):
"""
Base client app
"""
def __init__(self, sd, data, finished, addr, dst, dport, api=True,
timeout=3.0, retries=0):
self.dst = dst
self.dport = dport
self.api = api
self.path = None
self.iflist = []
self.retries = retries
super().__init__(sd, data, finished, addr, timeout)
self._get_path(api)
def _get_path(self, api):
if api:
self._get_path_via_api()
else:
self._get_path_direct()
assert self.path.mtu
def _get_path_via_api(self):
"""
Test local API.
"""
data = self._try_sciond_api()
path_len = data.pop(1) * 8
self.path = SCIONPath(data.pop(path_len))
haddr_type = haddr_get_type(data.pop(1))
data.pop(haddr_type.LEN) # first hop, unused here
data.pop(2) # port number, unused here
self.path.mtu = struct.unpack("!H", data.pop(2))[0]
ifcount = data.pop(1)
self.iflist = []
for i in range(ifcount):
isd_as = ISD_AS(data.pop(ISD_AS.LEN))
ifid = struct.unpack("!H", data.pop(2))[0]
self.iflist.append((isd_as, ifid))
def _try_sciond_api(self):
sock = ReliableSocket()
msg = b'\x00' + self.dst.isd_as.pack()
start = time.time()
try:
sock.connect(self.sd.api_addr)
except OSError as e:
logging.critical("Error connecting to sciond: %s", e)
kill_self()
while time.time() - start < API_TOUT:
logging.debug("Sending path request to local API at %s",
self.sd.api_addr)
sock.send(msg)
data = Raw(sock.recv()[0], "Path response")
if data:
sock.close()
return data
logging.debug("Empty response from local api.")
logging.critical("Unable to get path from local api.")
sock.close()
kill_self()
def _get_path_direct(self, flags=0):
logging.debug("Sending PATH request for %s", self.dst)
# Get paths through local API.
paths = []
for _ in range(5):
paths = self.sd.get_paths(self.dst.isd_as, flags=flags)
if paths:
break
else:
logging.critical("Unable to get path directly from sciond")
kill_self()
self.path = paths[0]
self._get_iflist()
def _get_iflist(self):
self.iflist = self.path.interfaces
def run(self):
while not self.finished.is_set():
self._send()
start = time.time()
spkt = self._recv()
recv_dur = time.time() - start
if not spkt:
logging.info("Timeout waiting for response")
self._retry_or_stop()
continue
r_code = self._handle_response(spkt)
if r_code in [ResponseRV.FAILURE, ResponseRV.SUCCESS]:
self._stop(success=bool(r_code))
else:
# Rate limit retries to 1 request per second.
self._retry_or_stop(1.0 - recv_dur)
self._shutdown()
def _retry_or_stop(self, delay=0.0):
if delay < 0:
delay = 0
if self.retries:
self.retries -= 1
logging.info("Retrying in %.1f s... (%d retries remaining)." %
(delay, self.retries))
time.sleep(delay)
self._get_path(self.api)
else:
self._stop()
def _stop(self, success=False):
self.success = success
self.finished.set()
def _send(self):
self._send_pkt(self._build_pkt())
if self.iflist:
logging.debug("Interfaces: %s", ", ".join(
["%s:%s" % ifentry for ifentry in self.iflist]))
def _build_pkt(self, path=None):
cmn_hdr, addr_hdr = build_base_hdrs(self.addr, self.dst)
l4_hdr = self._create_l4_hdr()
extensions = self._create_extensions()
if path is None:
path = self.path
spkt = SCIONL4Packet.from_values(
cmn_hdr, addr_hdr, path, extensions, l4_hdr)
spkt.set_payload(self._create_payload(spkt))
spkt.update()
return spkt
def _get_first_hop(self, spkt):
return self.sd.get_first_hop(spkt)
def _create_payload(self, spkt):
return PayloadRaw(self.data)
def _create_l4_hdr(self):
return SCIONUDPHeader.from_values(
self.addr, self.sock.port, self.dst, self.dport)
def _create_extensions(self):
return []
@abstractmethod
def _handle_response(self, spkt):
raise NotImplementedError
class TestServerBase(TestBase):
"""
Base server app
"""
def run(self):
while not self.finished.is_set():
spkt = self._recv()
if spkt and not self._handle_request(spkt):
self.success = False
self.finished.set()
self._shutdown()
@abstractmethod
def _handle_request(self, spkt):
raise NotImplementedError
class TestClientServerBase(object):
"""
Test module to run client and server
"""
NAME = ""
def __init__(self, client, server, sources, destinations, local=True,
max_runs=None, retries=0):
assert self.NAME
t = threading.current_thread()
t.name = self.NAME
self.client_ip = haddr_parse_interface(client)
self.server_ip = haddr_parse_interface(server)
self.src_ias = sources
self.dst_ias = destinations
self.local = local
self.scionds = {}
self.max_runs = max_runs
self.retries = retries
def run(self):
try:
self._run()
finally:
self._stop_scionds()
logging.info("All tests successful")
def _run(self):
"""
Run a test for every pair of src and dst
"""
# Generate all possible pairs, and randomise the order.
pairs = list(product(self.src_ias, self.dst_ias))
random.shuffle(pairs)
count = 0
for src_ia, dst_ia in pairs:
if not self.local and src_ia == dst_ia:
continue
count += 1
if self.max_runs and count > self.max_runs:
logging.debug("Hit max runs (%d), stopping", self.max_runs)
break
src = SCIONAddr.from_values(src_ia, self.client_ip)
dst = SCIONAddr.from_values(dst_ia, self.server_ip)
t = threading.current_thread()
t.name = "%s %s > %s main" % (self.NAME, src_ia, dst_ia)
if not self._run_test(src, dst):
sys.exit(1)
def _run_test(self, src, dst):
"""
Run client and server, wait for both to finish
"""
logging.info("Testing: %s -> %s", src.isd_as, dst.isd_as)
# finished is used by the client/server to signal to the other that they
# are stopping.
finished = threading.Event()
data = self._create_data(src, dst)
server = self._create_server(data, finished, dst)
client = self._create_client(data, finished, src, dst, server.sock.port)
server_name = "%s %s > %s server" % (self.NAME, src.isd_as, dst.isd_as)
s_thread = threading.Thread(
target=thread_safety_net, args=(server.run,), name=server_name,
daemon=True)
s_thread.start()
client.run()
# If client is finished, server should finish within ~1s (due to recv
# timeout). If it hasn't, then there was a problem.
s_thread.join(5.0)
if s_thread.is_alive():
logging.error("Timeout waiting for server thread to terminate")
return False
return self._check_result(client, server)
def _check_result(self, client, server):
if client.success and server.success:
logging.debug("Success")
return True
logging.error("Client success? %s Server success? %s",
client.success, server.success)
return False
def _create_data(self, src, dst):
return ("%s <-> %s" % (src.isd_as, dst.isd_as)).encode("UTF-8")
def _create_server(self, data, finished, addr):
"""
Instantiate server app
"""
return TestServerBase(self._run_sciond(addr), data, finished, addr)
def _create_client(self, data, finished, src, dst, port):
"""
Instantiate client app
"""
return TestClientBase(self._run_sciond(src), data, finished, src, dst,
port, retries=self.retries)
def _run_sciond(self, addr):
if addr.isd_as not in self.scionds:
logging.debug("Starting sciond for %s", addr.isd_as)
# Local api on, random port, random api port
self.scionds[addr.isd_as] = start_sciond(
addr, api=True, api_addr=SCIOND_API_SOCKDIR + "%s_%s.sock" %
(self.NAME, addr.isd_as))
return self.scionds[addr.isd_as]
def _stop_scionds(self):
for sd in self.scionds.values():
sd.stop()
def start_sciond(addr, api=False, port=0, api_addr=None):
conf_dir = "%s/ISD%d/AS%d/endhost" % (
GEN_PATH, addr.isd_as[0], addr.isd_as[1])
return SCIONDaemon.start(
conf_dir, addr.host, api_addr=api_addr, run_local_api=api, port=port)
def _load_as_list():
as_dict = load_yaml_file(os.path.join(GEN_PATH, AS_LIST_FILE))
as_list = []
for as_str in as_dict.get("Non-core", []) + as_dict.get("Core", []):
as_list.append(ISD_AS(as_str))
return as_list
def _parse_locs(as_str, as_list):
if as_str:
return [ISD_AS(as_str)]
copied = copy.copy(as_list)
random.shuffle(copied)
return copied
def setup_main(name, parser=None):
handle_signals()
parser = parser or argparse.ArgumentParser()
parser.add_argument('-l', '--loglevel', default="INFO",
help='Console logging level (Default: %(default)s)')
parser.add_argument('-c', '--client', help='Client address')
parser.add_argument('-s', '--server', help='Server address')
parser.add_argument('-m', '--mininet', action='store_true',
help="Running under mininet")
parser.add_argument("-r", "--runs", type=int,
help="Limit the number of pairs tested")
parser.add_argument("-w", "--wait", type=float, default=0.0,
help="Time in seconds to wait before running")
parser.add_argument("--retries", type=int, default=0,
help="Number of retries before giving up.")
parser.add_argument('src_ia', nargs='?', help='Src isd-as')
parser.add_argument('dst_ia', nargs='?', help='Dst isd-as')
args = parser.parse_args()
init_logging("logs/%s" % name, console_level=args.loglevel)
if not args.client:
args.client = "169.254.0.2" if args.mininet else "127.0.0.2"
if not args.server:
args.server = "169.254.0.3" if args.mininet else "127.0.0.3"
as_list = _load_as_list()
srcs = _parse_locs(args.src_ia, as_list)
dsts = _parse_locs(args.dst_ia, as_list)
return args, srcs, dsts
def main():
args, srcs, dsts = setup_main("base")
TestClientServerBase(args.client, args.server, srcs, dsts).run()
if __name__ == "__main__":
main_wrapper(main)
|
release.py
|
#!/usr/bin/python
import re
import sys
import os
import os.path
import subprocess
import shutil
import tempfile
from datetime import *
from multiprocessing import Process
from utils import *
from xml.etree.ElementTree import *
# TreeBuilder that preserves comments
class CommentedTreeBuilder(TreeBuilder):
def __init__(self, *args, **kwargs):
TreeBuilder.__init__(self, *args, **kwargs)
def comment(self, data):
self.start(Comment, {})
self.data(data)
self.end(Comment)
modules = []
uploader = None
git = None
def get_modules(directory):
'''Analyses the pom.xml file and extracts declared modules'''
tree = ElementTree()
f = directory + "/pom.xml"
if settings['verbose']:
print "Parsing %s to get a list of modules in project" % f
parser = XMLParser(target=CommentedTreeBuilder())
tree.parse(f, parser=parser)
mods = tree.findall(".//{%s}module" % maven_pom_xml_namespace)
for m in mods:
modules.append(m.text)
def help_and_exit():
prettyprint('''
Welcome to the Infinispan Release Script.
%s Usage:%s
$ bin/release.py <version> <branch to tag from> <--mvn-only>
%s E.g.,%s
$ bin/release.py 6.1.1.Beta1 %s<-- this will tag off master.%s
$ bin/release.py 6.1.1.Beta1 6.1.x %s<-- this will use the appropriate branch.%s
$ bin/release.py 6.1.1.Beta1 6.1.x --mvn-only %s<-- this will only tag and release to maven (no distribution).%s
''' % (Colors.yellow(), Colors.end_color(), Colors.yellow(), Colors.end_color(), Colors.green(), Colors.end_color(), Colors.green(), Colors.end_color(), Colors.green(), Colors.end_color()), Levels.INFO)
sys.exit(0)
def validate_version(version):
version_pattern = get_version_pattern()
if version_pattern.match(version):
return version.strip()
else:
prettyprint("Invalid version '"+version+"'!\n", Levels.FATAL)
help_and_exit()
def tag_release(version, branch):
if git.remote_branch_exists():
git.switch_to_branch()
git.create_tag_branch()
else:
prettyprint("Branch %s cannot be found on upstream repository. Aborting!" % branch, Levels.FATAL)
sys.exit(100)
def get_project_version_tag(tree):
return tree.find("./{%s}version" % (maven_pom_xml_namespace))
def get_parent_version_tag(tree):
return tree.find("./{%s}parent/{%s}version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def get_properties_version_tag(tree):
return tree.find("./{%s}properties/{%s}project-version" % (maven_pom_xml_namespace, maven_pom_xml_namespace))
def write_pom(tree, pom_file):
tree.write("tmp.xml", 'UTF-8')
in_f = open("tmp.xml")
out_f = open(pom_file, "w")
try:
for l in in_f:
newstr = l.replace("ns0:", "").replace(":ns0", "").replace("ns1", "xsi")
out_f.write(newstr)
finally:
in_f.close()
out_f.close()
os.remove("tmp.xml")
if settings['verbose']:
prettyprint(" ... updated %s" % pom_file, Levels.INFO)
def patch(pom_file, version):
'''Updates the version in a POM file. We need to locate //project/parent/version, //project/version and
//project/properties/project-version and replace the contents of these with the new version'''
if settings['verbose']:
prettyprint("Patching %s" % pom_file, Levels.DEBUG)
tree = ElementTree()
parser = XMLParser(target=CommentedTreeBuilder())
tree.parse(pom_file, parser=parser)
need_to_write = False
tags = []
tags.append(get_parent_version_tag(tree))
tags.append(get_project_version_tag(tree))
tags.append(get_properties_version_tag(tree))
for tag in tags:
if tag != None and "-SNAPSHOT" in tag.text:
if settings['verbose']:
prettyprint("%s is %s. Setting to %s" % (str(tag), tag.text, version), Levels.DEBUG)
tag.text=version
need_to_write = True
if need_to_write:
# write to file again!
write_pom(tree, pom_file)
return True
else:
if settings['verbose']:
prettyprint("File doesn't need updating; nothing replaced!", Levels.DEBUG)
return False
def get_poms_to_patch(working_dir):
get_modules(working_dir)
if settings['verbose']:
prettyprint('Available modules are ' + str(modules), Levels.DEBUG)
poms_to_patch = [working_dir + "/pom.xml"]
for m in modules:
poms_to_patch.append(working_dir + "/" + m + "/pom.xml")
# Look for additional POMs that are not directly referenced!
for additionalPom in GlobDirectoryWalker(working_dir, 'pom.xml'):
if additionalPom not in poms_to_patch:
poms_to_patch.append(additionalPom)
return poms_to_patch
def update_versions(base_dir, version):
os.chdir(base_dir)
poms_to_patch = get_poms_to_patch(".")
modified_files = []
for pom in poms_to_patch:
if patch(pom, version):
modified_files.append(pom)
pieces = re.compile('[\.\-]').split(version)
snapshot = pieces[3]=='SNAPSHOT'
final = pieces[3]=='Final'
# Now make sure this goes back into the repository.
git.commit(modified_files, "'Release Script: update versions for %s'" % version)
# And return the next version
if final:
return pieces[0] + '.' + pieces[1] + '.' + str(int(pieces[2])+ 1) + '-SNAPSHOT'
else:
return None
def get_module_name(pom_file):
tree = ElementTree()
parser = XMLParser(target=CommentedTreeBuilder())
tree.parse(pom_file, parser=parser)
return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
def upload_artifacts(dist_dir, version):
"""Artifacts gets rsync'ed to filemgmt.jboss.org, in the downloads_htdocs/infinispan directory"""
tempdir = tempfile.mkdtemp(prefix = '.tmp', dir='.')
os.mkdir("%s/%s" % (tempdir,version))
prettyprint("Copying from %s to %s" % (dist_dir, version), Levels.INFO)
for item in os.listdir(dist_dir):
full_name = "%s/%s" % (dist_dir, item)
if item.strip().lower().endswith(".zip") and os.path.isfile(full_name):
shutil.copy2(full_name, "%s/%s" % (tempdir,version))
uploader.upload_rsync("%s/%s" % (tempdir,version), "infinispan@filemgmt.jboss.org:/downloads_htdocs/infinispan")
shutil.rmtree(tempdir, ignore_errors = True)
def unzip_archive(version):
os.chdir("./distribution/target/distribution")
## Grab the distribution archive and un-arch it
shutil.rmtree("infinispan-%s-all" % version, ignore_errors = True)
if settings['verbose']:
subprocess.check_call(["unzip", "infinispan-%s-all.zip" % version])
else:
subprocess.check_call(["unzip", "-q", "infinispan-%s-all.zip" % version])
def prepare_docs(base_dir, version):
os.chdir("%s/distribution/target/distribution/infinispan-%s-all/docs" % (base_dir, version))
## "Fix" the docs to use the appropriate analytics tracker ID
subprocess.check_call(["%s/bin/updateTracker.sh" % base_dir])
os.mkdir("pdf")
subprocess.check_call(["mvn", "org.apache.maven.plugins:maven-dependency-plugin:2.10:unpack", "-DoutputDirectory=pdf", "-DrepoUrl=https://repository.jboss.org/nexus/content/groups/public-jboss/", "-Dartifact=org.infinispan:infinispan-docs:%s:zip:pdf" % (version)])
def upload_docs(base_dir, version):
"""Javadocs and PDFs get rsync'ed to filemgmt.jboss.org, in the docs_htdocs/infinispan directory"""
version_short = get_version_major_minor(version)
os.mkdir(version_short)
os.rename("api", "%s/apidocs" % version_short)
os.rename("pdf", "%s/pdf" % version_short)
## rsync this stuff to filemgmt.jboss.org
uploader.upload_rsync(version_short, "infinispan@filemgmt.jboss.org:/docs_htdocs/infinispan")
os.chdir(base_dir)
def upload_schema(base_dir, version):
"""Schema gets rsync'ed to filemgmt.jboss.org, in the docs_htdocs/infinispan/schemas and schema_htdoc/infinispan directories"""
os.chdir("%s/distribution/target/distribution/infinispan-%s-all/schema" % (base_dir, version))
## rsync this stuff to filemgmt.jboss.org, we put it in the orginal location (docs/infinispan/schemas) and the new location (schema/infinispan)
uploader.upload_rsync('.', "infinispan@filemgmt.jboss.org:/docs_htdocs/infinispan/schemas")
uploader.upload_rsync('.', "infinispan@filemgmt.jboss.org:/schema_htdocs/infinispan/")
## now the schema docs
version_short = get_version_major_minor(version)
os.chdir("%s/distribution/target/site" % base_dir)
os.mkdir(version_short)
os.rename("configdocs", "%s/configdocs" % version_short)
uploader.upload_rsync(version_short, "infinispan@filemgmt.jboss.org:/docs_htdocs/infinispan")
os.chdir(base_dir)
def do_task(target, args, async_processes):
if settings['multi_threaded']:
async_processes.append(Process(target = target, args = args))
else:
target(*args)
### This is the starting place for this script.
def release():
global settings
global uploader
global git
assert_python_minimum_version(2, 5)
require_settings_file()
# We start by determining whether the version passed in is a valid one
if len(sys.argv) < 2:
help_and_exit()
base_dir = os.getcwd()
version = validate_version(sys.argv[1])
branch = "master"
mvn_only = False
if len(sys.argv) > 2:
if sys.argv[2].startswith("--mvn-only"):
mvn_only = True
else:
branch = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3].startswith("--mvn-only"):
mvn_only = True
else:
prettyprint("Unknown argument %s" % sys.argv[3], Levels.WARNING)
help_and_exit()
prettyprint("Releasing Infinispan version %s from branch '%s'" % (version, branch), Levels.INFO)
sure = input_with_default("Are you sure you want to continue?", "N")
if not sure.upper().startswith("Y"):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
prettyprint("OK, releasing! Please stand by ...", Levels.INFO)
## Set up network interactive tools
if settings['dry_run']:
# Use stubs
prettyprint("*** This is a DRY RUN. No changes will be committed. Used to test this release script only. ***", Levels.DEBUG)
prettyprint("Your settings are %s" % settings, Levels.DEBUG)
uploader = DryRunUploader()
else:
uploader = Uploader()
git = Git(branch, version)
if not git.is_upstream_clone():
proceed = input_with_default('This is not a clone of an %supstream%s Infinispan repository! Are you sure you want to proceed?' % (Colors.UNDERLINE, Colors.END), 'N')
if not proceed.upper().startswith('Y'):
prettyprint("... User Abort!", Levels.WARNING)
sys.exit(1)
## Make sure we don't include un-needed content in the release
prettyprint("Step 1: Cleaning up working directory (un-tracked and modified files)", Levels.INFO)
git.clean_release_directory()
prettyprint("Step 1: Complete", Levels.INFO)
## Release order:
# Step 1: Tag in Git
prettyprint("Step 2: Tagging %s in git as %s" % (branch, version), Levels.INFO)
tag_release(version, branch)
prettyprint("Step 2: Complete", Levels.INFO)
# Step 2: Update version in tagged files
prettyprint("Step 3: Updating version number in source files", Levels.INFO)
version_next = update_versions(base_dir, version)
prettyprint("Step 3: Complete", Levels.INFO)
# Step 3: Build and test
prettyprint("Step 4: Build and test", Levels.INFO)
maven_build_distribution(version)
prettyprint("Step 4: Complete", Levels.INFO)
if not mvn_only:
async_processes = []
##Unzip the newly built archive now
unzip_archive(version)
# Step 4: Update javadoc Google Analytics tracker
prettyprint("Step 5: Prepare docs", Levels.INFO)
prepare_docs(base_dir, version)
prettyprint("Step 5: Complete", Levels.INFO)
# Step 5: Upload docs to FTP
prettyprint("Step 6: Uploading docs", Levels.INFO)
do_task(upload_docs, [base_dir, version], async_processes)
prettyprint("Step 6: Complete", Levels.INFO)
prettyprint("Step 7: Uploading Artifacts", Levels.INFO)
do_task(upload_artifacts, ["%s/distribution/target/distribution" % base_dir, version], async_processes)
do_task(upload_artifacts, ["%s/wildfly-modules/target/distribution" % base_dir, version], async_processes)
do_task(upload_artifacts, ["%s/server/integration/target/distribution" % base_dir, version], async_processes)
prettyprint("Step 7: Complete", Levels.INFO)
prettyprint("Step 8: Uploading to configuration XML schema", Levels.INFO)
do_task(upload_schema, [base_dir, version], async_processes)
prettyprint("Step 8: Complete", Levels.INFO)
## Wait for processes to finish
for p in async_processes:
p.start()
for p in async_processes:
p.join()
## Tag the release
git.tag_for_release()
step_no=9
if mvn_only:
step_no=5
# Switch back to the branch being released
git.switch_to_branch()
# Update to next version
if version_next is not None:
prettyprint("Step %s: Updating version number for next release" % step_no, Levels.INFO)
update_versions(base_dir, version_next)
prettyprint("Step %s: Complete" % step_no, Levels.INFO)
if not settings['dry_run']:
git.push_tag_to_origin()
if version_next is not None:
git.push_branch_to_origin()
git.cleanup()
else:
prettyprint("In dry-run mode. Not pushing tag to remote origin and not removing temp release branch %s." % git.working_branch, Levels.DEBUG)
prettyprint("\n\n\nDone! Now all you need to do is the remaining post-release tasks as outlined in https://mojo.redhat.com/docs/DOC-60994", Levels.INFO)
if __name__ == "__main__":
release()
|
find_primes_mult_old.py
|
#!/usr/bin/python3
import sys, os, subprocess, re, time, itertools, csv
from multiprocessing import Process, Manager, Pool
outfile = open('output.csv', 'w')
wr = csv.writer(outfile)
def worker(in_queue, out_list):
while True:
line = in_queue.get()
row = line[1].split(',')
server = row[0]
servername=row[1].strip('\n')
try:
cmd = subprocess.check_output([os.path.dirname(sys.argv[0])+"/openssl-trace",
"s_client", "-trace",
"-cipher", "DHE",
"-connect", server+":443"],
stdin=subprocess.DEVNULL, stderr=subprocess.DEVNULL, timeout=1)
for line in cmd.decode("ISO-8859-1").splitlines():
if 'dh_p' in line:
prime = int(re.sub(".*: ", "", line), 16)
out_list.append(['{} {} {}'.format(server, servername, prime)])
except subprocess.CalledProcessError:
out_list.append(['{} {} {}'.format(server, servername, "No_DHE")])
except subprocess.TimeoutExpired:
out_list.append(['{} {} {}'.format(server, servername, "Can't_connect")])
except:
out_list.append(['{} {} {}'.format(server, servername, "Error")])
# fake work
time.sleep(1)
if __name__ == "__main__":
num_workers = 100
manager = Manager()
results = manager.list()
work = manager.Queue(num_workers)
# start for workers
pool = []
for i in range(num_workers):
p = Process(target=worker, args=(work, results))
p.start()
pool.append(p)
# produce data
with open("test") as f:
iters = itertools.chain(f, (None,)*num_workers)
for line in enumerate(iters):
work.put(line)
for p in pool:
p.join()
wr.writerows(results)
#pool.close()
|
bot.py
|
# coding=utf8
"""
bot.py - Willie IRC Bot
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright 2012, Edward Powell, http://embolalia.net
Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net/
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import time
import imp
import os
import re
import sys
import socket
import threading
from datetime import datetime
from willie import tools
import willie.irc as irc
from willie.db import WillieDB
from willie.tools import (stderr, Nick, PriorityQueue, released,
get_command_regexp, iteritems, itervalues)
import willie.module as module
if sys.version_info.major >= 3:
unicode = str
basestring = str
py3 = True
else:
py3 = False
class Willie(irc.Bot):
NOLIMIT = module.NOLIMIT
def __init__(self, config):
irc.Bot.__init__(self, config.core)
self.config = config
"""The ``Config`` for the current Willie instance."""
self.doc = {}
"""
A dictionary of command names to their docstring and example, if
declared. The first item in a callable's commands list is used as the
key in version *3.2* onward. Prior to *3.2*, the name of the function
as declared in the source code was used.
"""
self.stats = {}
"""
A dictionary which maps a tuple of a function name and where it was
used to the nuber of times it was used there.
"""
self.times = {}
"""
A dictionary mapping lower-case'd nicks to dictionaries which map
funtion names to the time which they were last used by that nick.
"""
self.acivity = {}
self.server_capabilities = set()
"""A set containing the IRCv3 capabilities that the server supports.
For servers that do not support IRCv3, this will be an empty set."""
self.enabled_capabilities = set()
"""A set containing the IRCv3 capabilities that the bot has enabled."""
self._cap_reqs = dict()
"""A dictionary of capability requests
Maps the capability name to a list of tuples of the prefix ('-', '=',
or ''), the name of the requesting module, and the function to call if
the request is rejected."""
self.privileges = dict()
"""A dictionary of channels to their users and privilege levels
The value associated with each channel is a dictionary of Nicks to a
bitwise integer value, determined by combining the appropriate constants
from `module`."""
self.db = WillieDB(config)
if self.db.check_table('locales', ['name'], 'name'):
self.settings = self.db.locales
self.db.preferences = self.db.locales
elif self.db.check_table('preferences', ['name'], 'name'):
self.settings = self.db.preferences
elif self.db.type is not None:
self.db.add_table('preferences', ['name'], 'name')
self.settings = self.db.preferences
self.memory = tools.WillieMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
modules. See `WillieMemory <#tools.Willie.WillieMemory>`_
"""
self.scheduler = Willie.JobScheduler(self)
self.scheduler.start()
#Set up block lists
#Default to empty
if not self.config.core.nick_blocks:
self.config.core.nick_blocks = []
if not self.config.core.nick_blocks:
self.config.core.host_blocks = []
#Add nicks blocked under old scheme, if present
if self.config.core.other_bots:
nicks = self.config.core.get_list('nick_blocks')
bots = self.config.core.get_list('other_bots')
nicks.extend(bots)
self.config.core.nick_blocks = nicks
self.config.core.other_bots = False
self.config.save()
self.setup()
class JobScheduler(threading.Thread):
"""Calls jobs assigned to it in steady intervals.
JobScheduler is a thread that keeps track of Jobs and calls them every
X seconds, where X is a property of the Job. It maintains jobs in a
priority queue, where the next job to be called is always the first
item.
Thread safety is maintained with a mutex that is released during long
operations, so methods add_job and clear_jobs can be safely called from
the main thread.
"""
min_reaction_time = 30.0 # seconds
"""How often should scheduler checks for changes in the job list."""
def __init__(self, bot):
"""Requires bot as argument for logging."""
threading.Thread.__init__(self)
self.bot = bot
self._jobs = PriorityQueue()
# While PriorityQueue it self is thread safe, this mutex is needed
# to stop old jobs being put into new queue after clearing the
# queue.
self._mutex = threading.Lock()
# self.cleared is used for more fine grained locking.
self._cleared = False
def add_job(self, job):
"""Add a Job to the current job queue."""
self._jobs.put(job)
def clear_jobs(self):
"""Clear current Job queue and start fresh."""
if self._jobs.empty():
# Guards against getting stuck waiting for self._mutex when
# thread is waiting for self._jobs to not be empty.
return
with self._mutex:
self._cleared = True
self._jobs = PriorityQueue()
def run(self):
"""Run forever."""
while True:
try:
self._do_next_job()
except Exception:
# Modules exceptions are caught earlier, so this is a bit
# more serious. Options are to either stop the main thread
# or continue this thread and hope that it won't happen
# again.
self.bot.error()
# Sleep a bit to guard against busy-looping and filling
# the log with useless error messages.
time.sleep(10.0) # seconds
def _do_next_job(self):
"""Wait until there is a job and do it."""
with self._mutex:
# Wait until the next job should be executed.
# This has to be a loop, because signals stop time.sleep().
while True:
job = self._jobs.peek()
difference = job.next_time - time.time()
duration = min(difference, self.min_reaction_time)
if duration <= 0:
break
with released(self._mutex):
time.sleep(duration)
self._cleared = False
job = self._jobs.get()
with released(self._mutex):
if job.func.thread:
t = threading.Thread(
target=self._call, args=(job.func,)
)
t.start()
else:
self._call(job.func)
job.next()
# If jobs were cleared during the call, don't put an old job
# into the new job queue.
if not self._cleared:
self._jobs.put(job)
def _call(self, func):
"""Wrapper for collecting errors from modules."""
# Willie.bot.call is way too specialized to be used instead.
try:
func(self.bot)
except Exception:
self.bot.error()
class Job(object):
"""Hold information about when a function should be called next.
Job is a simple structure that hold information about when a function
should be called next.
They can be put in a priority queue, in which case the Job that should
be executed next is returned.
Calling the method next modifies the Job object for the next time it
should be executed. Current time is used to decide when the job should
be executed next so it should only be called right after the function
was called.
"""
max_catchup = 5
"""
This governs how much the scheduling of jobs is allowed
to get behind before they are simply thrown out to avoid
calling the same function too many times at once.
"""
def __init__(self, interval, func):
"""Initialize Job.
Args:
interval: number of seconds between calls to func
func: function to be called
"""
self.next_time = time.time() + interval
self.interval = interval
self.func = func
def next(self):
"""Update self.next_time with the assumption func was just called.
Returns: A modified job object.
"""
last_time = self.next_time
current_time = time.time()
delta = last_time + self.interval - current_time
if last_time > current_time + self.interval:
# Clock appears to have moved backwards. Reset
# the timer to avoid waiting for the clock to
# catch up to whatever time it was previously.
self.next_time = current_time + self.interval
elif delta < 0 and abs(delta) > self.interval * self.max_catchup:
# Execution of jobs is too far behind. Give up on
# trying to catch up and reset the time, so that
# will only be repeated a maximum of
# self.max_catchup times.
self.next_time = current_time - \
self.interval * self.max_catchup
else:
self.next_time = last_time + self.interval
return self
def __cmp__(self, other):
"""Compare Job objects according to attribute next_time."""
return self.next_time - other.next_time
if py3:
def __lt__(self, other):
return self.next_time < other.next_time
def __gt__(self, other):
return self.next_time > other.next_time
def __str__(self):
"""Return a string representation of the Job object.
Example result:
<Job(2013-06-14 11:01:36.884000, 20s, <function upper at 0x02386BF0>)>
"""
iso_time = str(datetime.fromtimestamp(self.next_time))
return "<Job(%s, %ss, %s)>" % \
(iso_time, self.interval, self.func)
def __iter__(self):
"""This is an iterator. Never stops though."""
return self
def setup(self):
stderr("\nWelcome to Willie. Loading modules...\n\n")
self.callables = set()
self.shutdown_methods = set()
filenames = self.config.enumerate_modules()
# Coretasks is special. No custom user coretasks.
this_dir = os.path.dirname(os.path.abspath(__file__))
filenames['coretasks'] = os.path.join(this_dir, 'coretasks.py')
modules = []
error_count = 0
for name, filename in iteritems(filenames):
try:
module = imp.load_source(name, filename)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(filename, os.path.dirname(__file__))
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error loading %s: %s (%s)" % (name, e, raising_stmt))
else:
try:
if hasattr(module, 'setup'):
module.setup(self)
self.register(vars(module))
modules.append(name)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(
filename, os.path.dirname(__file__)
)
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error in %s setup procedure: %s (%s)"
% (name, e, raising_stmt))
if modules:
stderr('\n\nRegistered %d modules,' % (len(modules) - 1))
stderr('%d modules failed to load\n\n' % error_count)
else:
stderr("Warning: Couldn't find any modules")
self.bind_commands()
@staticmethod
def is_callable(obj):
"""Return true if object is a willie callable.
Object must be both be callable and have hashable. Furthermore, it must
have either "commands", "rule" or "interval" as attributes to mark it
as a willie callable.
"""
if not callable(obj):
# Check is to help distinguish between willie callables and objects
# which just happen to have parameter commands or rule.
return False
if (hasattr(obj, 'commands') or
hasattr(obj, 'rule') or
hasattr(obj, 'interval')):
return True
return False
@staticmethod
def is_shutdown(obj):
"""Return true if object is a willie shutdown method.
Object must be both be callable and named shutdown.
"""
if (callable(obj) and
hasattr(obj, "__name__")
and obj.__name__ == 'shutdown'):
return True
return False
def register(self, variables):
"""Register all willie callables.
With the ``__dict__`` attribute from a Willie module, update or add the
trigger commands and rules, to allow the function to be triggered, and
shutdown methods, to allow the modules to be notified when willie is
quitting.
"""
for obj in itervalues(variables):
if self.is_callable(obj):
self.callables.add(obj)
if self.is_shutdown(obj):
self.shutdown_methods.add(obj)
def unregister(self, variables):
"""Unregister all willie callables in variables, and their bindings.
When unloading a module, this ensures that the unloaded modules will
not get called and that the objects can be garbage collected. Objects
that have not been registered are ignored.
Args:
variables -- A list of callable objects from a willie module.
"""
def remove_func(func, commands):
"""Remove all traces of func from commands."""
for func_list in itervalues(commands):
if func in func_list:
func_list.remove(func)
hostmask = "%s!%s@%s" % (self.nick, self.user, socket.gethostname())
willie = self.WillieWrapper(self, irc.Origin(self, hostmask, [], {}))
for obj in itervalues(variables):
if obj in self.callables:
self.callables.remove(obj)
for commands in itervalues(self.commands):
remove_func(obj, commands)
if obj in self.shutdown_methods:
try:
obj(willie)
except Exception as e:
stderr(
"Error calling shutdown method for module %s:%s" %
(obj.__module__, e)
)
self.shutdown_methods.remove(obj)
def sub(self, pattern):
"""Replace any of the following special directives in a function's rule expression:
$nickname -> the bot's nick
$nick -> the bot's nick followed by : or ,
"""
nick = re.escape(self.nick)
# These replacements have significant order
subs = [('$nickname', r'{0}'.format(nick)),
('$nick', r'{0}[,:]\s+'.format(nick)),
]
for directive, subpattern in subs:
pattern = pattern.replace(directive, subpattern)
return pattern
def bind_commands(self):
self.commands = {'high': {}, 'medium': {}, 'low': {}}
self.scheduler.clear_jobs()
def bind(priority, regexp, func):
# Function name is no longer used for anything, as far as I know,
# but we're going to keep it around anyway.
if not hasattr(func, 'name'):
func.name = func.__name__
def trim_docstring(doc):
"""Clean up a docstring"""
if not doc:
return []
lines = doc.expandtabs().splitlines()
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
return trimmed
doc = trim_docstring(func.__doc__)
if hasattr(func, 'commands') and func.commands[0]:
example = None
if hasattr(func, 'example'):
if isinstance(func.example, basestring):
# Support old modules that add the attribute directly.
example = func.example
else:
# The new format is a list of dicts.
example = func.example[0]["example"]
example = example.replace('$nickname', str(self.nick))
if doc or example:
for command in func.commands:
self.doc[command] = (doc, example)
self.commands[priority].setdefault(regexp, []).append(func)
for func in self.callables:
if not hasattr(func, 'unblockable'):
func.unblockable = False
if not hasattr(func, 'priority'):
func.priority = 'medium'
if not hasattr(func, 'thread'):
func.thread = True
if not hasattr(func, 'event'):
func.event = 'PRIVMSG'
else:
func.event = func.event.upper()
if not hasattr(func, 'rate'):
if hasattr(func, 'commands'):
func.rate = 0
else:
func.rate = 0
if hasattr(func, 'rule'):
rules = func.rule
if isinstance(rules, basestring):
rules = [func.rule]
if isinstance(rules, list):
for rule in rules:
pattern = self.sub(rule)
flags = re.IGNORECASE
if rule.find("\n") != -1:
flags |= re.VERBOSE
regexp = re.compile(pattern, flags)
bind(func.priority, regexp, func)
elif isinstance(func.rule, tuple):
# 1) e.g. ('$nick', '(.*)')
if len(func.rule) == 2 and isinstance(func.rule[0], str):
prefix, pattern = func.rule
prefix = self.sub(prefix)
regexp = re.compile(prefix + pattern, re.I)
bind(func.priority, regexp, func)
# 2) e.g. (['p', 'q'], '(.*)')
elif len(func.rule) == 2 and \
isinstance(func.rule[0], list):
prefix = self.config.core.prefix
commands, pattern = func.rule
for command in commands:
command = r'(%s)\b(?: +(?:%s))?' % (
command, pattern
)
regexp = re.compile(prefix + command, re.I)
bind(func.priority, regexp, func)
# 3) e.g. ('$nick', ['p', 'q'], '(.*)')
elif len(func.rule) == 3:
prefix, commands, pattern = func.rule
prefix = self.sub(prefix)
for command in commands:
command = r'(%s) +' % command
regexp = re.compile(
prefix + command + pattern, re.I
)
bind(func.priority, regexp, func)
if hasattr(func, 'commands'):
for command in func.commands:
prefix = self.config.core.prefix
regexp = get_command_regexp(prefix, command)
bind(func.priority, regexp, func)
if hasattr(func, 'interval'):
for interval in func.interval:
job = Willie.Job(interval, func)
self.scheduler.add_job(job)
class WillieWrapper(object):
def __init__(self, willie, origin):
object.__setattr__(self, 'bot', willie)
object.__setattr__(self, 'origin', origin)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__)+classattrs+dir(self.bot)
def say(self, string, max_messages=1):
self.bot.msg(self.origin.sender, string, max_messages)
def reply(self, string, notice=False):
if isinstance(string, str) and not py3:
string = string.decode('utf8')
if notice:
self.notice(
'%s: %s' % (self.origin.nick, string),
self.origin.sender
)
else:
self.bot.msg(
self.origin.sender,
'%s: %s' % (self.origin.nick, string)
)
def action(self, string, recipient=None):
if recipient is None:
recipient = self.origin.sender
self.bot.msg(recipient, '\001ACTION %s\001' % string)
def notice(self, string, recipient=None):
if recipient is None:
recipient = self.origin.sender
self.write(('NOTICE', recipient), string)
def __getattr__(self, attr):
return getattr(self.bot, attr)
def __setattr__(self, attr, value):
return setattr(self.bot, attr, value)
class Trigger(unicode):
def __new__(cls, text, origin, bytes, match, event, args, self):
s = unicode.__new__(cls, text)
"""Is trigger from a channel or in PM"""
s.is_privmsg = origin.sender.is_nick()
s.sender = origin.sender
"""
The channel (or nick, in a private message) from which the
message was sent.
"""
s.hostmask = origin.hostmask
"""
Hostmask of the person who sent the message in the form
<nick>!<user>@<host>
"""
s.user = origin.user
"""Local username of the person who sent the message"""
s.nick = origin.nick
"""The ``Nick`` of the person who sent the message."""
s.event = event
"""
The IRC event (e.g. ``PRIVMSG`` or ``MODE``) which triggered the
message."""
s.bytes = bytes
"""
The text which triggered the message. Equivalent to
``Trigger.group(0)``.
"""
s.match = match
"""
The regular expression ``MatchObject_`` for the triggering line.
.. _MatchObject: http://docs.python.org/library/re.html#match-objects
"""
s.group = match.group
"""The ``group`` function of the ``match`` attribute.
See Python ``re_`` documentation for details."""
s.groups = match.groups
"""The ``groups`` function of the ``match`` attribute.
See Python ``re_`` documentation for details."""
s.args = args
"""
A tuple containing each of the arguments to an event. These are the
strings passed between the event name and the colon. For example,
setting ``mode -m`` on the channel ``#example``, args would be
``('#example', '-m')``
"""
s.tags = origin.tags
"""A map of the IRCv3 message tags on the message.
If the message had no tags, or the server does not support IRCv3
message tags, this will be an empty dict."""
def match_host_or_nick(pattern):
pattern = tools.get_hostmask_regex(pattern)
return bool(
pattern.match(origin.nick) or
pattern.match('@'.join((origin.nick, origin.host)))
)
s.admin = any(match_host_or_nick(item)
for item in self.config.core.get_list('admins'))
"""
True if the nick which triggered the command is in Willie's admin
list as defined in the config file.
"""
s.owner = match_host_or_nick(self.config.core.owner)
s.admin = s.admin or s.owner
s.host = origin.host
if s.sender is not s.nick: # no ops in PM
s.ops = self.ops.get(s.sender, [])
"""
List of channel operators in the channel the message was
recived in
"""
s.halfplus = self.halfplus.get(s.sender, [])
"""
List of channel half-operators in the channel the message was
recived in
"""
s.isop = (s.nick in s.ops or
s.nick in s.halfplus)
"""True if the user is half-op or an op"""
s.voices = self.voices.get(s.sender, [])
"""
List of channel operators in the channel the message was
recived in
"""
s.isvoice = (s.nick in s.ops or
s.nick in s.halfplus or
s.nick in s.voices)
"""True if the user is voiced, has op, or has half-op"""
else:
s.isop = False
s.isvoice = False
s.ops = []
s.halfplus = []
s.voices = []
return s
def call(self, func, origin, willie, trigger):
nick = trigger.nick
if nick not in self.times:
self.times[nick] = dict()
if not trigger.admin and \
not func.unblockable and \
func.rate > 0 and \
func in self.times[nick]:
timediff = time.time() - self.times[nick][func]
if timediff < func.rate:
self.times[nick][func] = time.time()
self.debug(
__file__,
"%s prevented from using %s in %s: %d < %d" % (
trigger.nick, func.__name__, trigger.sender,
timediff, func.rate
),
"verbose"
)
return
try:
exit_code = func(willie, trigger)
except Exception:
exit_code = None
self.error(origin, trigger)
if exit_code != module.NOLIMIT:
self.times[nick][func] = time.time()
def limit(self, origin, func):
if origin.sender and not origin.sender.is_nick():
if self.config.has_section('limit'):
limits = self.config.limit.get(origin.sender)
if limits and (func.__module__ not in limits):
return True
return False
def dispatch(self, origin, text, args):
event, args = args[0], args[1:]
wrapper = self.WillieWrapper(self, origin)
if self.config.core.nick_blocks or self.config.core.host_blocks:
nick_blocked = self._nick_blocked(origin.nick)
host_blocked = self._host_blocked(origin.host)
else:
nick_blocked = host_blocked = None
list_of_blocked_functions = []
for priority in ('high', 'medium', 'low'):
items = self.commands[priority].items()
for regexp, funcs in items:
match = regexp.match(text)
if not match:
continue
trigger = self.Trigger(
text, origin, text, match, event, args, self
)
for func in funcs:
if (not trigger.admin and
not func.unblockable and
(nick_blocked or host_blocked)):
function_name = "%s.%s" % (
func.__module__, func.__name__
)
list_of_blocked_functions.append(function_name)
continue
if event != func.event:
continue
if self.limit(origin, func):
continue
if func.thread:
targs = (func, origin, wrapper, trigger)
t = threading.Thread(target=self.call, args=targs)
t.start()
else:
self.call(func, origin, wrapper, trigger)
if list_of_blocked_functions:
if nick_blocked and host_blocked:
block_type = 'both'
elif nick_blocked:
block_type = 'nick'
else:
block_type = 'host'
self.debug(
__file__,
"[%s]%s prevented from using %s." % (
block_type,
origin.nick,
', '.join(list_of_blocked_functions)
),
"verbose"
)
def _host_blocked(self, host):
bad_masks = self.config.core.get_list('host_blocks')
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
bad_nicks = self.config.core.get_list('nick_blocks')
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Nick(bad_nick) == nick):
return True
return False
def debug(self, tag, text, level):
"""Sends an error to Willie's configured ``debug_target``.
Args:
tag - What the msg will be tagged as. It is recommended to pass
__file__ as the tag. If the file exists, a relative path is
used as the file. Otherwise the tag is used as it is.
text - Body of the message.
level - Either verbose, warning or always. Configuration option
config.verbose which levels are ignored.
Returns: True if message was sent.
"""
if not self.config.core.verbose:
self.config.core.verbose = 'warning'
if not self.config.core.debug_target:
self.config.core.debug_target = 'stdio'
debug_target = self.config.core.debug_target
verbosity = self.config.core.verbose
if os.path.exists(tag):
tag = os.path.relpath(tag, os.path.dirname(__file__))
debug_msg = "[%s] %s" % (tag, text)
output_on = {
'verbose': ['verbose'],
'warning': ['verbose', 'warning'],
'always': ['verbose', 'warning', 'always'],
}
if level in output_on and verbosity in output_on[level]:
if debug_target == 'stdio':
print(debug_msg)
else:
self.msg(debug_target, debug_msg)
return True
else:
return False
def _shutdown(self):
stderr(
'Calling shutdown for %d modules.' % (len(self.shutdown_methods),)
)
hostmask = "%s!%s@%s" % (self.nick, self.user, socket.gethostname())
willie = self.WillieWrapper(self, irc.Origin(self, hostmask, [], {}))
for shutdown_method in self.shutdown_methods:
try:
stderr(
"calling %s.%s" % (
shutdown_method.__module__, shutdown_method.__name__,
)
)
shutdown_method(willie)
except Exception as e:
stderr(
"Error calling shutdown method for module %s:%s" % (
shutdown_method.__module__, e
)
)
def cap_req(self, module_name, capability, failure_callback):
"""Tell Willie to request a capability when it starts.
By prefixing the capability with `-`, it will be ensured that the
capability is not enabled. Simmilarly, by prefixing the capability with
`=`, it will be ensured that the capability is enabled. Requiring and
disabling is "first come, first served"; if one module requires a
capability, and another prohibits it, this function will raise an
exception in whichever module loads second. An exception will also be
raised if the module is being loaded after the bot has already started,
and the request would change the set of enabled capabilities.
If the capability is not prefixed, and no other module prohibits it, it
will be requested. Otherwise, it will not be requested. Since
capability requests that are not mandatory may be rejected by the
server, as well as by other modules, a module which makes such a
request should account for that possibility.
The actual capability request to the server is handled after the
completion of this function. In the event that the server denies a
request, the `failure_callback` function will be called, if provided.
The arguments will be a `Willie` object, and the capability which was
rejected. This can be used to disable callables which rely on the
capability.
"""
#TODO raise better exceptions
cap = capability[1:]
prefix = capability[0]
if prefix == '-':
if self.connection_registered and cap in self.enabled_capabilities:
raise Exception('Can not change capabilities after server '
'connection has been completed.')
entry = self._cap_reqs.get(cap, [])
if any((ent[0] != '-' for ent in entry)):
raise Exception('Capability conflict')
entry.append((prefix, module_name, failure_callback))
self._cap_reqs[cap] = entry
else:
if prefix != '=':
cap = capability
prefix = ''
if self.connection_registered and (cap not in
self.enabled_capabilities):
raise Exception('Can not change capabilities after server '
'connection has been completed.')
entry = self._cap_reqs.get(cap, [])
# Non-mandatory will callback at the same time as if the server
# rejected it.
if any((ent[0] == '-' for ent in entry)) and prefix == '=':
raise Exception('Capability conflict')
entry.append((prefix, module_name, failure_callback))
self._cap_reqs[cap] = entry
|
run_summarization.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This is the top-level file to train, evaluate or test your summarization model"""
import sys
import time
import os
import tensorflow as tf
import numpy as np
from collections import namedtuple
from data import Vocab
from batcher import Batcher
from model import SummarizationModel
from decode import BeamSearchDecoder
import util
import numpy as np
from glob import glob
from tensorflow.python import debug as tf_debug
from replay_buffer import ReplayBuffer
from dqn import DQN
from threading import Thread
import pickle
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import bernoulli
FLAGS = tf.app.flags.FLAGS
# Where to find data
tf.app.flags.DEFINE_string('data_path', '', 'Path expression to tf.Example datafiles. Can include wildcards to access multiple datafiles.')
tf.app.flags.DEFINE_string('vocab_path', '', 'Path expression to text vocabulary file.')
# Important settings
tf.app.flags.DEFINE_string('mode', 'train', 'must be one of train/eval/decode')
tf.app.flags.DEFINE_boolean('single_pass', False, 'For decode mode only. If True, run eval on the full dataset using a fixed checkpoint, i.e. take the current checkpoint, and use it to produce one summary for each example in the dataset, write the summaries to file and then get ROUGE scores for the whole dataset. If False (default), run concurrent decoding, i.e. repeatedly load latest checkpoint, use it to produce summaries for randomly-chosen examples and log the results to screen, indefinitely.')
tf.app.flags.DEFINE_integer('decode_after', 0, 'skip already decoded docs')
# Where to save output
tf.app.flags.DEFINE_string('log_root', '', 'Root directory for all logging.')
tf.app.flags.DEFINE_string('exp_name', '', 'Name for experiment. Logs will be saved in a directory with this name, under log_root.')
# Hyperparameters
tf.app.flags.DEFINE_integer('enc_hidden_dim', 256, 'dimension of RNN hidden states')
tf.app.flags.DEFINE_integer('dec_hidden_dim', 256, 'dimension of RNN hidden states')
tf.app.flags.DEFINE_integer('emb_dim', 128, 'dimension of word embeddings')
tf.app.flags.DEFINE_integer('batch_size', 64, 'minibatch size')
tf.app.flags.DEFINE_integer('max_enc_steps', 400, 'max timesteps of encoder (max source text tokens)')
tf.app.flags.DEFINE_integer('max_dec_steps', 100, 'max timesteps of decoder (max summary tokens)')
tf.app.flags.DEFINE_integer('beam_size', 4, 'beam size for beam search decoding.')
tf.app.flags.DEFINE_integer('min_dec_steps', 35, 'Minimum sequence length of generated summary. Applies only for beam search decoding mode')
tf.app.flags.DEFINE_integer('max_iter', 55000, 'max number of iterations')
tf.app.flags.DEFINE_integer('vocab_size', 50000, 'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number, or if this number is set to 0, will take all words in the vocabulary file.')
tf.app.flags.DEFINE_float('lr', 0.15, 'learning rate')
tf.app.flags.DEFINE_float('adagrad_init_acc', 0.1, 'initial accumulator value for Adagrad')
tf.app.flags.DEFINE_float('rand_unif_init_mag', 0.02, 'magnitude for lstm cells random uniform inititalization')
tf.app.flags.DEFINE_float('trunc_norm_init_std', 1e-4, 'std of trunc norm init, used for initializing everything else')
tf.app.flags.DEFINE_float('max_grad_norm', 2.0, 'for gradient clipping')
tf.app.flags.DEFINE_string('embedding', None, 'path to the pre-trained embedding file')
tf.app.flags.DEFINE_integer('gpu_num', 0, 'which gpu to use to train the model')
# Pointer-generator or baseline model
tf.app.flags.DEFINE_boolean('pointer_gen', True, 'If True, use pointer-generator model. If False, use baseline model.')
# Pointer-generator with Self-Critic policy gradient: https://arxiv.org/pdf/1705.04304.pdf
tf.app.flags.DEFINE_boolean('rl_training', False, 'Use policy-gradient training by collecting rewards at the end of sequence.')
tf.app.flags.DEFINE_boolean('convert_to_reinforce_model', False, 'Convert a pointer model to a reinforce model. Turn this on and run in train mode. Your current training model will be copied to a new version (same name with _cov_init appended) that will be ready to run with coverage flag turned on, for the coverage training stage.')
tf.app.flags.DEFINE_boolean('intradecoder', False, 'Use intradecoder attention or not')
tf.app.flags.DEFINE_boolean('use_temporal_attention', False, 'Whether to use temporal attention or not')
tf.app.flags.DEFINE_boolean('matrix_attention', False, 'Use matrix attention, Eq. 2 https://arxiv.org/pdf/1705.04304.pdf')
tf.app.flags.DEFINE_float('eta', 0, 'RL/MLE scaling factor, 1 means use RL loss, 0 means use MLE loss')
tf.app.flags.DEFINE_boolean('fixed_eta', False, 'Use fixed value for eta or adaptive based on global step')
tf.app.flags.DEFINE_float('gamma', 0.99, 'discount factor')
tf.app.flags.DEFINE_string('reward_function', 'rouge_l/f_score', 'either bleu or one of the rouge measures (rouge_1/f_score,rouge_2/f_score,rouge_l/f_score)')
# parameters of DDQN model
tf.app.flags.DEFINE_boolean('ac_training', False, 'Use Actor-Critic learning by DDQN.')
tf.app.flags.DEFINE_boolean('dqn_scheduled_sampling', False, 'Whether to use scheduled sampling to use estimates of dqn model vs the actual q-estimates values')
tf.app.flags.DEFINE_string('dqn_layers', '512,256,128', 'DQN dense hidden layer size, will create three dense layers with 512, 256, and 128 size')
tf.app.flags.DEFINE_integer('dqn_replay_buffer_size', 100000, 'Size of the replay buffer')
tf.app.flags.DEFINE_integer('dqn_batch_size', 100, 'Batch size for training the DDQN model')
tf.app.flags.DEFINE_integer('dqn_target_update', 10000, 'Update target Q network every 10000 steps')
tf.app.flags.DEFINE_integer('dqn_sleep_time', 2, 'Train DDQN model every 2 seconds')
tf.app.flags.DEFINE_integer('dqn_gpu_num', 0, 'GPU number to train the DDQN')
tf.app.flags.DEFINE_boolean('dueling_net', True, 'Whether to use Duelling Network to train the model') # https://arxiv.org/pdf/1511.06581.pdf
tf.app.flags.DEFINE_boolean('dqn_polyak_averaging', True, 'Whether to use polyak averaging to update the target network parameters')
tf.app.flags.DEFINE_boolean('calculate_true_q', False, "Whether to use true Q-values to train DQN or use DQN's estimates to train it")
tf.app.flags.DEFINE_boolean('dqn_pretrain', False, "Pretrain the DDQN network with fixed Actor model")
tf.app.flags.DEFINE_integer('dqn_pretrain_steps', 10000, 'Number of steps to pre-train the DDQN')
#scheduled sampling parameters, https://arxiv.org/pdf/1506.03099.pdf
# At each time step t and for each sequence in the batch, we get the input to next decoding step by either
# (1) sampling from the final distribution at (t-1), or
# (2) reading from input_decoder_embedding.
# We do (1) with probability sampling_probability and (2) with 1 - sampling_probability.
# Using sampling_probability=0.0 is equivalent to using only the ground truth data (no sampling).
# Using sampling_probability=1.0 is equivalent to doing inference by only relying on the sampled token generated at each decoding step
tf.app.flags.DEFINE_boolean('scheduled_sampling', False, 'whether to do scheduled sampling or not')
tf.app.flags.DEFINE_string('decay_function', 'linear','linear, exponential, inv_sigmoid') #### TODO: implement this
tf.app.flags.DEFINE_float('sampling_probability', 0, 'epsilon value for choosing ground-truth or model output')
tf.app.flags.DEFINE_boolean('fixed_sampling_probability', False, 'Whether to use fixed sampling probability or adaptive based on global step')
tf.app.flags.DEFINE_boolean('hard_argmax', True, 'Whether to use soft argmax or hard argmax')
tf.app.flags.DEFINE_boolean('greedy_scheduled_sampling', False, 'Whether to use greedy approach or sample for the output, if True it uses greedy')
tf.app.flags.DEFINE_boolean('E2EBackProp', False, 'Whether to use E2EBackProp algorithm to solve exposure bias')
tf.app.flags.DEFINE_float('alpha', 1, 'soft argmax argument')
tf.app.flags.DEFINE_integer('k', 1, 'number of samples')
# Coverage hyperparameters
tf.app.flags.DEFINE_boolean('coverage', False, 'Use coverage mechanism. Note, the experiments reported in the ACL paper train WITHOUT coverage until converged, and then train for a short phase WITH coverage afterwards. i.e. to reproduce the results in the ACL paper, turn this off for most of training then turn on for a short phase at the end.')
tf.app.flags.DEFINE_float('cov_loss_wt', 1.0, 'Weight of coverage loss (lambda in the paper). If zero, then no incentive to minimize coverage loss.')
# Utility flags, for restoring and changing checkpoints
tf.app.flags.DEFINE_boolean('convert_to_coverage_model', False, 'Convert a non-coverage model to a coverage model. Turn this on and run in train mode. Your current training model will be copied to a new version (same name with _cov_init appended) that will be ready to run with coverage flag turned on, for the coverage training stage.')
tf.app.flags.DEFINE_boolean('restore_best_model', False, 'Restore the best model in the eval/ dir and save it in the train/ dir, ready to be used for further training. Useful for early stopping, or if your training checkpoint has become corrupted with e.g. NaN values.')
# Debugging. See https://www.tensorflow.org/programmers_guide/debugger
tf.app.flags.DEFINE_boolean('debug', False, "Run in tensorflow's debug mode (watches for NaN/inf values)")
class Seq2Seq(object):
def calc_running_avg_loss(self, loss, running_avg_loss, step, decay=0.99):
"""Calculate the running average loss via exponential decay.
This is used to implement early stopping w.r.t. a more smooth loss curve than the raw loss curve.
Args:
loss: loss on the most recent eval step
running_avg_loss: running_avg_loss so far
summary_writer: FileWriter object to write for tensorboard
step: training iteration step
decay: rate of exponential decay, a float between 0 and 1. Larger is smoother.
Returns:
running_avg_loss: new running average loss
"""
if running_avg_loss == 0: # on the first iteration just take the loss
running_avg_loss = loss
else:
running_avg_loss = running_avg_loss * decay + (1 - decay) * loss
running_avg_loss = min(running_avg_loss, 12) # clip
loss_sum = tf.Summary()
tag_name = 'running_avg_loss/decay=%f' % (decay)
loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)
self.summary_writer.add_summary(loss_sum, step)
tf.logging.info('running_avg_loss: %f', running_avg_loss)
return running_avg_loss
def restore_best_model(self):
"""Load bestmodel file from eval directory, add variables for adagrad, and save to train directory"""
tf.logging.info("Restoring bestmodel for training...")
# Initialize all vars in the model
sess = tf.Session(config=util.get_config())
print("Initializing all variables...")
sess.run(tf.initialize_all_variables())
# Restore the best model from eval dir
saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name])
print("Restoring all non-adagrad variables from best model in eval dir...")
curr_ckpt = util.load_ckpt(saver, sess, "eval")
print("Restored %s." % curr_ckpt)
# Save this model to train dir and quit
new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model")
new_fname = os.path.join(FLAGS.log_root, "train", new_model_name)
print("Saving model to %s..." % (new_fname))
new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables
new_saver.save(sess, new_fname)
print("Saved.")
exit()
def restore_best_eval_model(self):
# load best evaluation loss so far
best_loss = None
best_step = None
# goes through all event files and select the best loss achieved and return it
event_files = sorted(glob('{}/eval/events*'.format(FLAGS.log_root)))
for ef in event_files:
try:
for e in tf.train.summary_iterator(ef):
for v in e.summary.value:
step = e.step
if 'running_avg_loss/decay' in v.tag:
running_avg_loss = v.simple_value
if best_loss is None or running_avg_loss < best_loss:
best_loss = running_avg_loss
best_step = step
except:
continue
tf.logging.info('resotring best loss from the current logs: {}\tstep: {}'.format(best_loss, best_step))
return best_loss
def convert_to_coverage_model(self):
"""Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint"""
tf.logging.info("converting non-coverage model to coverage model..")
# initialize an entire coverage model from scratch
sess = tf.Session(config=util.get_config())
print("initializing everything...")
sess.run(tf.global_variables_initializer())
# load all non-coverage weights from checkpoint
saver = tf.train.Saver([v for v in tf.global_variables() if "coverage" not in v.name and "Adagrad" not in v.name])
print("restoring non-coverage variables...")
curr_ckpt = util.load_ckpt(saver, sess)
print("restored.")
# save this model and quit
new_fname = curr_ckpt + '_cov_init'
print("saving model to %s..." % (new_fname))
new_saver = tf.train.Saver() # this one will save all variables that now exist
new_saver.save(sess, new_fname)
print("saved.")
exit()
def convert_to_reinforce_model(self):
"""Load non-reinforce checkpoint, add initialized extra variables for reinforce, and save as new checkpoint"""
tf.logging.info("converting non-reinforce model to reinforce model..")
# initialize an entire reinforce model from scratch
sess = tf.Session(config=util.get_config())
print("initializing everything...")
sess.run(tf.global_variables_initializer())
# load all non-reinforce weights from checkpoint
saver = tf.train.Saver([v for v in tf.global_variables() if "reinforce" not in v.name and "Adagrad" not in v.name])
print("restoring non-reinforce variables...")
curr_ckpt = util.load_ckpt(saver, sess)
print("restored.")
# save this model and quit
new_fname = curr_ckpt + '_rl_init'
print("saving model to %s..." % (new_fname))
new_saver = tf.train.Saver() # this one will save all variables that now exist
new_saver.save(sess, new_fname)
print("saved.")
exit()
def setup_training(self):
"""Does setup before starting training (run_training)"""
train_dir = os.path.join(FLAGS.log_root, "train")
if not os.path.exists(train_dir): os.makedirs(train_dir)
if FLAGS.ac_training:
dqn_train_dir = os.path.join(FLAGS.log_root, "dqn", "train")
if not os.path.exists(dqn_train_dir): os.makedirs(dqn_train_dir)
#replaybuffer_pcl_path = os.path.join(FLAGS.log_root, "replaybuffer.pcl")
#if not os.path.exists(dqn_target_train_dir): os.makedirs(dqn_target_train_dir)
self.model.build_graph() # build the graph
if FLAGS.convert_to_reinforce_model:
assert (FLAGS.rl_training or FLAGS.ac_training), "To convert your pointer model to a reinforce model, run with convert_to_reinforce_model=True and either rl_training=True or ac_training=True"
self.convert_to_reinforce_model()
if FLAGS.convert_to_coverage_model:
assert FLAGS.coverage, "To convert your non-coverage model to a coverage model, run with convert_to_coverage_model=True and coverage=True"
self.convert_to_coverage_model()
if FLAGS.restore_best_model:
self.restore_best_model()
saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time
# Loads pre-trained word-embedding. By default the model learns the embedding.
if FLAGS.embedding:
self.vocab.LoadWordEmbedding(FLAGS.embedding, FLAGS.emb_dim)
word_vector = self.vocab.getWordEmbedding()
self.sv = tf.train.Supervisor(logdir=train_dir,
is_chief=True,
saver=saver,
summary_op=None,
save_summaries_secs=60, # save summaries for tensorboard every 60 secs
save_model_secs=60, # checkpoint every 60 secs
global_step=self.model.global_step,
init_feed_dict= {self.model.embedding_place:word_vector} if FLAGS.embedding else None
)
self.summary_writer = self.sv.summary_writer
self.sess = self.sv.prepare_or_wait_for_session(config=util.get_config())
if FLAGS.ac_training:
tf.logging.info('DDQN building graph')
t1 = time.time()
# We create a separate graph for DDQN
self.dqn_graph = tf.Graph()
with self.dqn_graph.as_default():
self.dqn.build_graph() # build dqn graph
tf.logging.info('building current network took {} seconds'.format(time.time()-t1))
self.dqn_target.build_graph() # build dqn target graph
tf.logging.info('building target network took {} seconds'.format(time.time()-t1))
dqn_saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time
self.dqn_sv = tf.train.Supervisor(logdir=dqn_train_dir,
is_chief=True,
saver=dqn_saver,
summary_op=None,
save_summaries_secs=60, # save summaries for tensorboard every 60 secs
save_model_secs=60, # checkpoint every 60 secs
global_step=self.dqn.global_step,
)
self.dqn_summary_writer = self.dqn_sv.summary_writer
self.dqn_sess = self.dqn_sv.prepare_or_wait_for_session(config=util.get_config())
''' #### TODO: try loading a previously saved replay buffer
# right now this doesn't work due to running DQN on a thread
if os.path.exists(replaybuffer_pcl_path):
tf.logging.info('Loading Replay Buffer...')
try:
self.replay_buffer = pickle.load(open(replaybuffer_pcl_path, "rb"))
tf.logging.info('Replay Buffer loaded...')
except:
tf.logging.info('Couldn\'t load Replay Buffer file...')
self.replay_buffer = ReplayBuffer(self.dqn_hps)
else:
self.replay_buffer = ReplayBuffer(self.dqn_hps)
tf.logging.info("Building DDQN took {} seconds".format(time.time()-t1))
'''
self.replay_buffer = ReplayBuffer(self.dqn_hps)
tf.logging.info("Preparing or waiting for session...")
tf.logging.info("Created session.")
try:
self.run_training() # this is an infinite loop until interrupted
except (KeyboardInterrupt, SystemExit):
tf.logging.info("Caught keyboard interrupt on worker. Stopping supervisor...")
self.sv.stop()
if FLAGS.ac_training:
self.dqn_sv.stop()
def run_training(self):
"""Repeatedly runs training iterations, logging loss to screen and writing summaries"""
tf.logging.info("Starting run_training")
if FLAGS.debug: # start the tensorflow debugger
self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)
self.sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
self.train_step = 0
if FLAGS.ac_training:
# DDQN training is done asynchronously along with model training
tf.logging.info('Starting DQN training thread...')
self.dqn_train_step = 0
self.thrd_dqn_training = Thread(target=self.dqn_training)
self.thrd_dqn_training.daemon = True
self.thrd_dqn_training.start()
watcher = Thread(target=self.watch_threads)
watcher.daemon = True
watcher.start()
# starting the main thread
tf.logging.info('Starting Seq2Seq training...')
while True: # repeats until interrupted
batch = self.batcher.next_batch()
t0=time.time()
if FLAGS.ac_training:
# For DDQN, we first collect the model output to calculate the reward and Q-estimates
# Then we fix the estimation either using our target network or using the true Q-values
# This process will usually take time and we are working on improving it.
transitions = self.model.collect_dqn_transitions(self.sess, batch, self.train_step, batch.max_art_oovs) # len(batch_size * k * max_dec_steps)
tf.logging.info('Q-values collection time: {}'.format(time.time()-t0))
# whenever we are working with the DDQN, we switch using DDQN graph rather than default graph
with self.dqn_graph.as_default():
batch_len = len(transitions)
# we use current decoder state to predict q_estimates, use_state_prime = False
b = ReplayBuffer.create_batch(self.dqn_hps, transitions,len(transitions), use_state_prime = False, max_art_oovs = batch.max_art_oovs)
# we also get the next decoder state to correct the estimation, use_state_prime = True
b_prime = ReplayBuffer.create_batch(self.dqn_hps, transitions,len(transitions), use_state_prime = True, max_art_oovs = batch.max_art_oovs)
# use current DQN to estimate values from current decoder state
dqn_results = self.dqn.run_test_steps(sess=self.dqn_sess, x= b._x, return_best_action=True)
q_estimates = dqn_results['estimates'] # shape (len(transitions), vocab_size)
dqn_best_action = dqn_results['best_action']
#dqn_q_estimate_loss = dqn_results['loss']
# use target DQN to estimate values for the next decoder state
dqn_target_results = self.dqn_target.run_test_steps(self.dqn_sess, x= b_prime._x)
q_vals_new_t = dqn_target_results['estimates'] # shape (len(transitions), vocab_size)
# we need to expand the q_estimates to match the input batch max_art_oov
# we use the q_estimate of UNK token for all the OOV tokens
q_estimates = np.concatenate([q_estimates,
np.reshape(q_estimates[:,0],[-1,1])*np.ones((len(transitions),batch.max_art_oovs))],axis=-1)
# modify Q-estimates using the result collected from current and target DQN.
# check algorithm 5 in the paper for more info: https://arxiv.org/pdf/1805.09461.pdf
for i, tr in enumerate(transitions):
if tr.done:
q_estimates[i][tr.action] = tr.reward
else:
q_estimates[i][tr.action] = tr.reward + FLAGS.gamma * q_vals_new_t[i][dqn_best_action[i]]
# use scheduled sampling to whether use true Q-values or DDQN estimation
if FLAGS.dqn_scheduled_sampling:
q_estimates = self.scheduled_sampling(batch_len, FLAGS.sampling_probability, b._y_extended, q_estimates)
if not FLAGS.calculate_true_q:
# when we are not training DDQN based on true Q-values,
# we need to update Q-values in our transitions based on the q_estimates we collected from DQN current network.
for trans, q_val in zip(transitions,q_estimates):
trans.q_values = q_val # each have the size vocab_extended
q_estimates = np.reshape(q_estimates, [FLAGS.batch_size, FLAGS.k, FLAGS.max_dec_steps, -1]) # shape (batch_size, k, max_dec_steps, vocab_size_extended)
# Once we are done with modifying Q-values, we can use them to train the DDQN model.
# In this paper, we use a priority experience buffer which always selects states with higher quality
# to train the DDQN. The following line will add batch_size * max_dec_steps experiences to the replay buffer.
# As mentioned before, the DDQN training is asynchronous. Therefore, once the related queues for DDQN training
# are full, the DDQN will start the training.
self.replay_buffer.add(transitions)
# If dqn_pretrain flag is on, it means that we use a fixed Actor to only collect experiences for
# DDQN pre-training
if FLAGS.dqn_pretrain:
tf.logging.info('RUNNNING DQN PRETRAIN: Adding data to relplay buffer only...')
continue
# if not, use the q_estimation to update the loss.
results = self.model.run_train_steps(self.sess, batch, self.train_step, q_estimates)
else:
results = self.model.run_train_steps(self.sess, batch, self.train_step)
t1=time.time()
# get the summaries and iteration number so we can write summaries to tensorboard
summaries = results['summaries'] # we will write these summaries to tensorboard using summary_writer
self.train_step = results['global_step'] # we need this to update our running average loss
tf.logging.info('seconds for training step {}: {}'.format(self.train_step, t1-t0))
printer_helper = {}
printer_helper['pgen_loss']= results['pgen_loss']
if FLAGS.coverage:
printer_helper['coverage_loss'] = results['coverage_loss']
if FLAGS.rl_training or FLAGS.ac_training:
printer_helper['rl_cov_total_loss']= results['reinforce_cov_total_loss']
else:
printer_helper['pointer_cov_total_loss'] = results['pointer_cov_total_loss']
if FLAGS.rl_training or FLAGS.ac_training:
printer_helper['shared_loss'] = results['shared_loss']
printer_helper['rl_loss'] = results['rl_loss']
printer_helper['rl_avg_logprobs'] = results['rl_avg_logprobs']
if FLAGS.rl_training:
printer_helper['sampled_r'] = np.mean(results['sampled_sentence_r_values'])
printer_helper['greedy_r'] = np.mean(results['greedy_sentence_r_values'])
printer_helper['r_diff'] = printer_helper['sampled_r'] - printer_helper['greedy_r']
if FLAGS.ac_training:
printer_helper['dqn_loss'] = np.mean(self.avg_dqn_loss) if len(self.avg_dqn_loss)>0 else 0
for (k,v) in printer_helper.items():
if not np.isfinite(v):
raise Exception("{} is not finite. Stopping.".format(k))
tf.logging.info('{}: {}\t'.format(k,v))
tf.logging.info('-------------------------------------------')
self.summary_writer.add_summary(summaries, self.train_step) # write the summaries
if self.train_step % 100 == 0: # flush the summary writer every so often
self.summary_writer.flush()
if FLAGS.ac_training:
self.dqn_summary_writer.flush()
if self.train_step > FLAGS.max_iter: break
def dqn_training(self):
""" training the DDQN network."""
try:
while True:
if self.dqn_train_step == FLAGS.dqn_pretrain_steps: raise SystemExit()
_t = time.time()
self.avg_dqn_loss = []
avg_dqn_target_loss = []
# Get a batch of size dqn_batch_size from replay buffer to train the model
dqn_batch = self.replay_buffer.next_batch()
if dqn_batch is None:
tf.logging.info('replay buffer not loaded enough yet...')
time.sleep(60)
continue
# Run train step for Current DQN model and collect the results
dqn_results = self.dqn.run_train_steps(self.dqn_sess, dqn_batch)
# Run test step for Target DQN model and collect the results and monitor the difference in loss between the two
dqn_target_results = self.dqn_target.run_test_steps(self.dqn_sess, x=dqn_batch._x, y=dqn_batch._y, return_loss=True)
self.dqn_train_step = dqn_results['global_step']
self.dqn_summary_writer.add_summary(dqn_results['summaries'], self.dqn_train_step) # write the summaries
self.avg_dqn_loss.append(dqn_results['loss'])
avg_dqn_target_loss.append(dqn_target_results['loss'])
self.dqn_train_step = self.dqn_train_step + 1
tf.logging.info('seconds for training dqn model: {}'.format(time.time()-_t))
# UPDATING TARGET DDQN NETWORK WITH CURRENT MODEL
with self.dqn_graph.as_default():
current_model_weights = self.dqn_sess.run([self.dqn.model_trainables])[0] # get weights of current model
self.dqn_target.run_update_weights(self.dqn_sess, self.dqn_train_step, current_model_weights) # update target model weights with current model weights
tf.logging.info('DQN loss at step {}: {}'.format(self.dqn_train_step, np.mean(self.avg_dqn_loss)))
tf.logging.info('DQN Target loss at step {}: {}'.format(self.dqn_train_step, np.mean(avg_dqn_target_loss)))
# sleeping is required if you want the keyboard interuption to work
time.sleep(FLAGS.dqn_sleep_time)
except (KeyboardInterrupt, SystemExit):
tf.logging.info("Caught keyboard interrupt on worker. Stopping supervisor...")
self.sv.stop()
self.dqn_sv.stop()
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
if not self.thrd_dqn_training.is_alive(): # if the thread is dead
tf.logging.error('Found DQN Learning thread dead. Restarting.')
self.thrd_dqn_training = Thread(target=self.dqn_training)
self.thrd_dqn_training.daemon = True
self.thrd_dqn_training.start()
def run_eval(self):
"""Repeatedly runs eval iterations, logging to screen and writing summaries. Saves the model with the best loss seen so far."""
self.model.build_graph() # build the graph
saver = tf.train.Saver(max_to_keep=3) # we will keep 3 best checkpoints at a time
sess = tf.Session(config=util.get_config())
if FLAGS.embedding:
sess.run(tf.global_variables_initializer(),feed_dict={self.model.embedding_place:self.word_vector})
eval_dir = os.path.join(FLAGS.log_root, "eval") # make a subdir of the root dir for eval data
bestmodel_save_path = os.path.join(eval_dir, 'bestmodel') # this is where checkpoints of best models are saved
self.summary_writer = tf.summary.FileWriter(eval_dir)
if FLAGS.ac_training:
tf.logging.info('DDQN building graph')
t1 = time.time()
dqn_graph = tf.Graph()
with dqn_graph.as_default():
self.dqn.build_graph() # build dqn graph
tf.logging.info('building current network took {} seconds'.format(time.time()-t1))
self.dqn_target.build_graph() # build dqn target graph
tf.logging.info('building target network took {} seconds'.format(time.time()-t1))
dqn_saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time
dqn_sess = tf.Session(config=util.get_config())
dqn_train_step = 0
replay_buffer = ReplayBuffer(self.dqn_hps)
running_avg_loss = 0 # the eval job keeps a smoother, running average loss to tell it when to implement early stopping
best_loss = self.restore_best_eval_model() # will hold the best loss achieved so far
train_step = 0
while True:
_ = util.load_ckpt(saver, sess) # load a new checkpoint
if FLAGS.ac_training:
_ = util.load_dqn_ckpt(dqn_saver, dqn_sess) # load a new checkpoint
processed_batch = 0
avg_losses = []
# evaluate for 100 * batch_size before comparing the loss
# we do this due to memory constraint, best to run eval on different machines with large batch size
while processed_batch < 100*FLAGS.batch_size:
processed_batch += FLAGS.batch_size
batch = self.batcher.next_batch() # get the next batch
if FLAGS.ac_training:
t0 = time.time()
transitions = self.model.collect_dqn_transitions(sess, batch, train_step, batch.max_art_oovs) # len(batch_size * k * max_dec_steps)
tf.logging.info('Q values collection time: {}'.format(time.time()-t0))
with dqn_graph.as_default():
# if using true Q-value to train DQN network,
# we do this as the pre-training for the DQN network to get better estimates
batch_len = len(transitions)
b = ReplayBuffer.create_batch(self.dqn_hps, transitions,len(transitions), use_state_prime = True, max_art_oovs = batch.max_art_oovs)
b_prime = ReplayBuffer.create_batch(self.dqn_hps, transitions,len(transitions), use_state_prime = True, max_art_oovs = batch.max_art_oovs)
dqn_results = self.dqn.run_test_steps(sess=dqn_sess, x= b._x, return_best_action=True)
q_estimates = dqn_results['estimates'] # shape (len(transitions), vocab_size)
dqn_best_action = dqn_results['best_action']
tf.logging.info('running test step on dqn_target')
dqn_target_results = self.dqn_target.run_test_steps(dqn_sess, x= b_prime._x)
q_vals_new_t = dqn_target_results['estimates'] # shape (len(transitions), vocab_size)
# we need to expand the q_estimates to match the input batch max_art_oov
q_estimates = np.concatenate([q_estimates,np.zeros((len(transitions),batch.max_art_oovs))],axis=-1)
tf.logging.info('fixing the action q-estimates')
for i, tr in enumerate(transitions):
if tr.done:
q_estimates[i][tr.action] = tr.reward
else:
q_estimates[i][tr.action] = tr.reward + FLAGS.gamma * q_vals_new_t[i][dqn_best_action[i]]
if FLAGS.dqn_scheduled_sampling:
tf.logging.info('scheduled sampling on q-estimates')
q_estimates = self.scheduled_sampling(batch_len, FLAGS.sampling_probability, b._y_extended, q_estimates)
if not FLAGS.calculate_true_q:
# when we are not training DQN based on true Q-values
# we need to update Q-values in our transitions based on this q_estimates we collected from DQN current network.
for trans, q_val in zip(transitions,q_estimates):
trans.q_values = q_val # each have the size vocab_extended
q_estimates = np.reshape(q_estimates, [FLAGS.batch_size, FLAGS.k, FLAGS.max_dec_steps, -1]) # shape (batch_size, k, max_dec_steps, vocab_size_extended)
tf.logging.info('run eval step on seq2seq model.')
t0=time.time()
results = self.model.run_eval_step(sess, batch, train_step, q_estimates)
t1=time.time()
else:
tf.logging.info('run eval step on seq2seq model.')
t0=time.time()
results = self.model.run_eval_step(sess, batch, train_step)
t1=time.time()
tf.logging.info('experiment: {}'.format(FLAGS.exp_name))
tf.logging.info('processed_batch: {}, seconds for batch: {}'.format(processed_batch, t1-t0))
printer_helper = {}
loss = printer_helper['pgen_loss']= results['pgen_loss']
if FLAGS.coverage:
printer_helper['coverage_loss'] = results['coverage_loss']
if FLAGS.rl_training or FLAGS.ac_training:
loss = printer_helper['rl_cov_total_loss']= results['reinforce_cov_total_loss']
else:
loss = printer_helper['pointer_cov_total_loss'] = results['pointer_cov_total_loss']
if FLAGS.rl_training or FLAGS.ac_training:
printer_helper['shared_loss'] = results['shared_loss']
printer_helper['rl_loss'] = results['rl_loss']
printer_helper['rl_avg_logprobs'] = results['rl_avg_logprobs']
for (k,v) in printer_helper.items():
if not np.isfinite(v):
raise Exception("{} is not finite. Stopping.".format(k))
tf.logging.info('{}: {}\t'.format(k,v))
# add summaries
summaries = results['summaries']
train_step = results['global_step']
self.summary_writer.add_summary(summaries, train_step)
# calculate running avg loss
avg_losses.append(self.calc_running_avg_loss(np.asscalar(loss), running_avg_loss, train_step))
tf.logging.info('-------------------------------------------')
running_avg_loss = np.mean(avg_losses)
tf.logging.info('==========================================')
tf.logging.info('best_loss: {}\trunning_avg_loss: {}\t'.format(best_loss, running_avg_loss))
tf.logging.info('==========================================')
# If running_avg_loss is best so far, save this checkpoint (early stopping).
# These checkpoints will appear as bestmodel-<iteration_number> in the eval dir
if best_loss is None or running_avg_loss < best_loss:
tf.logging.info('Found new best model with %.3f running_avg_loss. Saving to %s', running_avg_loss, bestmodel_save_path)
saver.save(sess, bestmodel_save_path, global_step=train_step, latest_filename='checkpoint_best')
best_loss = running_avg_loss
# flush the summary writer every so often
if train_step % 100 == 0:
self.summary_writer.flush()
#time.sleep(600) # run eval every 10 minute
def main(self, unused_argv):
if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
raise Exception("Problem with flags: %s" % unused_argv)
FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want
tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))
# Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
flags = getattr(FLAGS,"__flags")
if not os.path.exists(FLAGS.log_root):
if FLAGS.mode=="train":
os.makedirs(FLAGS.log_root)
fw = open('{}/config.txt'.format(FLAGS.log_root),'w')
for k,v in flags.items():
fw.write('{}\t{}\n'.format(k,v))
fw.close()
else:
raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))
self.vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary
# If in decode mode, set batch_size = beam_size
# Reason: in decode mode, we decode one example at a time.
# On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
if FLAGS.mode == 'decode':
FLAGS.batch_size = FLAGS.beam_size
# If single_pass=True, check we're in decode mode
if FLAGS.single_pass and FLAGS.mode!='decode':
raise Exception("The single_pass flag should only be True in decode mode")
# Make a namedtuple hps, containing the values of the hyperparameters that the model needs
hparam_list = ['mode', 'lr', 'gpu_num',
#'sampled_greedy_flag',
'gamma', 'eta',
'fixed_eta', 'reward_function', 'intradecoder',
'use_temporal_attention', 'ac_training','rl_training', 'matrix_attention', 'calculate_true_q',
'enc_hidden_dim', 'dec_hidden_dim', 'k',
'scheduled_sampling', 'sampling_probability','fixed_sampling_probability',
'alpha', 'hard_argmax', 'greedy_scheduled_sampling',
'adagrad_init_acc', 'rand_unif_init_mag',
'trunc_norm_init_std', 'max_grad_norm',
'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps',
'dqn_scheduled_sampling', 'dqn_sleep_time', 'E2EBackProp',
'coverage', 'cov_loss_wt', 'pointer_gen']
hps_dict = {}
for key,val in flags.items(): # for each flag
if key in hparam_list: # if it's in the list
hps_dict[key] = val # add it to the dict
if FLAGS.ac_training:
hps_dict.update({'dqn_input_feature_len':(FLAGS.dec_hidden_dim)})
self.hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
# creating all the required parameters for DDQN model.
if FLAGS.ac_training:
hparam_list = ['lr', 'dqn_gpu_num',
'dqn_layers',
'dqn_replay_buffer_size',
'dqn_batch_size',
'dqn_target_update',
'dueling_net',
'dqn_polyak_averaging',
'dqn_sleep_time',
'dqn_scheduled_sampling',
'max_grad_norm']
hps_dict = {}
for key,val in flags.items(): # for each flag
if key in hparam_list: # if it's in the list
hps_dict[key] = val # add it to the dict
hps_dict.update({'dqn_input_feature_len':(FLAGS.dec_hidden_dim)})
hps_dict.update({'vocab_size':self.vocab.size()})
self.dqn_hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
# Create a batcher object that will create minibatches of data
self.batcher = Batcher(FLAGS.data_path, self.vocab, self.hps, single_pass=FLAGS.single_pass, decode_after=FLAGS.decode_after)
tf.set_random_seed(111) # a seed value for randomness
if self.hps.mode == 'train':
print("creating model...")
self.model = SummarizationModel(self.hps, self.vocab)
if FLAGS.ac_training:
# current DQN with paramters \Psi
self.dqn = DQN(self.dqn_hps,'current')
# target DQN with paramters \Psi^{\prime}
self.dqn_target = DQN(self.dqn_hps,'target')
self.setup_training()
elif self.hps.mode == 'eval':
self.model = SummarizationModel(self.hps, self.vocab)
if FLAGS.ac_training:
self.dqn = DQN(self.dqn_hps,'current')
self.dqn_target = DQN(self.dqn_hps,'target')
self.run_eval()
elif self.hps.mode == 'decode':
decode_model_hps = self.hps # This will be the hyperparameters for the decoder model
decode_model_hps = self.hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
model = SummarizationModel(decode_model_hps, self.vocab)
if FLAGS.ac_training:
# We need our target DDQN network for collecting Q-estimation at each decoder step.
dqn_target = DQN(self.dqn_hps,'target')
else:
dqn_target = None
decoder = BeamSearchDecoder(model, self.batcher, self.vocab, dqn = dqn_target)
decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
else:
raise ValueError("The 'mode' flag must be one of train/eval/decode")
# Scheduled sampling used for either selecting true Q-estimates or the DDQN estimation
# based on https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/ScheduledEmbeddingTrainingHelper
def scheduled_sampling(self, batch_size, sampling_probability, true, estimate):
with variable_scope.variable_scope("ScheduledEmbedding"):
# Return -1s where we do not sample, and sample_ids elsewhere
select_sampler = bernoulli.Bernoulli(probs=sampling_probability, dtype=tf.bool)
select_sample = select_sampler.sample(sample_shape=batch_size)
sample_ids = array_ops.where(
select_sample,
tf.range(batch_size),
gen_array_ops.fill([batch_size], -1))
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), tf.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), tf.int32)
_estimate = array_ops.gather_nd(estimate, where_sampling)
_true = array_ops.gather_nd(true, where_not_sampling)
base_shape = array_ops.shape(true)
result1 = array_ops.scatter_nd(indices=where_sampling, updates=_estimate, shape=base_shape)
result2 = array_ops.scatter_nd(indices=where_not_sampling, updates=_true, shape=base_shape)
result = result1 + result2
return result1 + result2
def main(unused_argv):
seq2seq = Seq2Seq()
seq2seq.main(unused_argv)
if __name__ == '__main__':
tf.app.run()
|
_server.py
|
"""
A Simple server used to show mpld3 images.
"""
import sys
import threading
import webbrowser
import socket
import itertools
import random
IPYTHON_WARNING = """
Note: if you're in the IPython notebook, mpld3.show() is not the best command
to use. Consider using mpld3.display(), or mpld3.enable_notebook().
See more information at http://mpld3.github.io/quickstart.html.
You must interrupt the kernel to end this command
"""
try:
# Python 2.x
import BaseHTTPServer as server
except ImportError:
# Python 3.x
from http import server
def generate_handler(html, files=None):
if files is None:
files = {}
class MyHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path == '/':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write("<html><head>"
"<title>mpld3 plot</title>"
"</head><body>\n")
self.wfile.write(html)
self.wfile.write("</body></html>")
elif self.path in files:
content_type, content = files[self.path]
self.send_response(200)
self.send_header("Content-type", content_type)
self.end_headers()
self.wfile.write(content)
else:
self.send_error(404)
return MyHandler
def find_open_port(ip, port, n=50):
"""Find an open port near the specified port"""
ports = itertools.chain((port + i for i in range(n)),
(port + random.randint(-2 * n, 2 * n)))
for port in ports:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((ip, port))
s.close()
if result != 0:
return port
raise ValueError("no open ports found")
def serve_and_open(html, ip='127.0.0.1', port=8888, n_retries=50, files=None,
ipython_warning=True):
"""Start a server serving the given HTML, and open a browser
Parameters
----------
html : string
HTML to serve
ip : string (default = '127.0.0.1')
ip address at which the HTML will be served.
port : int (default = 8888)
the port at which to serve the HTML
n_retries : int (default = 50)
the number of nearby ports to search if the specified port is in use.
files : dictionary (optional)
dictionary of extra content to serve
ipython_warning : bool (optional)
if True (default), then print a warning if this is used within IPython
"""
port = find_open_port(ip, port, n_retries)
Handler = generate_handler(html, files)
srvr = server.HTTPServer((ip, port), Handler)
if ipython_warning:
try:
__IPYTHON__
except:
pass
else:
print(IPYTHON_WARNING)
# Start the server
print("Serving to http://{0}:{1}/ [Ctrl-C to exit]".format(ip, port))
sys.stdout.flush()
# Use a thread to open a web browser pointing to the server
b = lambda: webbrowser.open('http://{0}:{1}'.format(ip, port))
threading.Thread(target=b).start()
try:
srvr.serve_forever()
except (KeyboardInterrupt, SystemExit):
print("\nstopping Server...")
srvr.server_close()
|
__init__.py
|
# -*- coding: utf-8 -*-
# Three possible modes:
# 'cli': running from "wandb" command
# 'run': we're a script launched by "wandb run"
# 'dryrun': we're a script not launched by "wandb run"
from __future__ import absolute_import, print_function
__author__ = """Chris Van Pelt"""
__email__ = 'vanpelt@wandb.com'
__version__ = '0.8.18'
import atexit
import click
import io
import json
import logging
import time
import os
import contextlib
import signal
import six
import getpass
import socket
import subprocess
import sys
import traceback
import tempfile
import re
import glob
import threading
import platform
import collections
from six.moves import queue
from six import string_types
from importlib import import_module
from . import env
from . import io_wrap
from .core import *
# These imports need to be below "from .core import *" until we remove
# 'from wandb import __stage_dir__' from api.py etc.
from wandb.apis import InternalApi, PublicApi, CommError
from wandb import wandb_types as types
from wandb import wandb_config
from wandb import wandb_run
from wandb import wandb_socket
from wandb import streaming_log
from wandb import util
from wandb.run_manager import LaunchError, Process
from wandb.data_types import Image
from wandb.data_types import Video
from wandb.data_types import Audio
from wandb.data_types import Table
from wandb.data_types import Html
from wandb.data_types import Object3D
from wandb.data_types import Histogram
from wandb.data_types import Graph
from wandb import trigger
from wandb.dataframes import image_categorizer_dataframe
from wandb.dataframes import image_segmentation_dataframe
from wandb.dataframes import image_segmentation_binary_dataframe
from wandb.dataframes import image_segmentation_multiclass_dataframe
from wandb import wandb_torch
from wandb.wandb_controller import controller
from wandb.wandb_agent import agent
from wandb.wandb_controller import sweep
from wandb.compat import windows
logger = logging.getLogger(__name__)
# Internal variables
_shutdown_async_log_thread_wait_time = 20
# this global W&B debug log gets re-written by every W&B process
if __stage_dir__ is not None:
GLOBAL_LOG_FNAME = os.path.abspath(os.path.join(wandb_dir(), 'debug.log'))
else:
GLOBAL_LOG_FNAME = os.path.join(tempfile.gettempdir(), 'wandb-debug.log')
def _debugger(*args):
import pdb
pdb.set_trace()
class Callbacks():
@property
def Keras(self):
termlog(
"DEPRECATED: wandb.callbacks is deprecated, use `from wandb.keras import WandbCallback`")
from wandb.keras import WandbCallback
return WandbCallback
callbacks = Callbacks()
def hook_torch(*args, **kwargs):
termlog(
"DEPRECATED: wandb.hook_torch is deprecated, use `wandb.watch`")
return watch(*args, **kwargs)
_global_watch_idx = 0
def watch(models, criterion=None, log="gradients", log_freq=100, idx=None):
"""
Hooks into the torch model to collect gradients and the topology. Should be extended
to accept arbitrary ML models.
:param (torch.Module) models: The model to hook, can be a tuple
:param (torch.F) criterion: An optional loss value being optimized
:param (str) log: One of "gradients", "parameters", "all", or None
:param (int) log_freq: log gradients and parameters every N batches
:param (int) idx: an index to be used when calling wandb.watch on multiple models
:return: (wandb.Graph) The graph object that will populate after the first backward pass
"""
global _global_watch_idx
if run is None:
raise ValueError(
"You must call `wandb.init` before calling watch")
log_parameters = False
log_gradients = True
if log == "all":
log_parameters = True
elif log == "parameters":
log_parameters = True
log_gradients = False
elif log is None:
log_gradients = False
if not isinstance(models, (tuple, list)):
models = (models,)
graphs = []
prefix = ''
if idx is None:
idx = _global_watch_idx
for local_idx, model in enumerate(models):
global_idx = idx + local_idx
_global_watch_idx += 1
if global_idx > 0:
# TODO: this makes ugly chart names like gradients/graph_1conv1d.bias
prefix = "graph_%i" % global_idx
run.history.torch.add_log_hooks_to_pytorch_module(
model, log_parameters=log_parameters, log_gradients=log_gradients, prefix=prefix, log_freq=log_freq)
graph = wandb_torch.TorchGraph.hook_torch(
model, criterion, graph_idx=global_idx)
graphs.append(graph)
# NOTE: the graph is set in run.summary by hook_torch on the backward pass
return graphs
def unwatch(models=None):
"""Remove pytorch gradient and parameter hooks.
Args:
models (list): Optional list of pytorch models that have had watch called on them
"""
if models:
if not isinstance(models, (tuple, list)):
models = (models,)
for model in models:
if not hasattr(model, "_wandb_hook_names"):
termwarn("%s model has not been watched" % model)
else:
for name in model._wandb_hook_names:
run.history.torch.unhook(name)
else:
run.history.torch.unhook_all()
class ExitHooks(object):
def __init__(self):
self.exit_code = 0
self.exception = None
def hook(self):
self._orig_exit = sys.exit
sys.exit = self.exit
sys.excepthook = self.exc_handler
def exit(self, code=0):
orig_code = code
if code is None:
code = 0
elif not isinstance(code, int):
code = 1
self.exit_code = code
self._orig_exit(orig_code)
def was_ctrl_c(self):
return isinstance(self.exception, KeyboardInterrupt)
def exc_handler(self, exc_type, exc, *tb):
self.exit_code = 1
self.exception = exc
if issubclass(exc_type, Error):
termerror(str(exc))
if self.was_ctrl_c():
self.exit_code = 255
traceback.print_exception(exc_type, exc, *tb)
def _init_headless(run, cloud=True):
global join
global _user_process_finished_called
environ = dict(os.environ)
run.set_environment(environ)
server = wandb_socket.Server()
run.socket = server
hooks = ExitHooks()
hooks.hook()
if platform.system() == "Windows":
try:
import win32api
# Make sure we are not ignoring CTRL_C_EVENT
# https://docs.microsoft.com/en-us/windows/console/setconsolectrlhandler
# https://stackoverflow.com/questions/1364173/stopping-python-using-ctrlc
win32api.SetConsoleCtrlHandler(None, False)
except ImportError:
termerror("Install the win32api library with `pip install pypiwin32`")
# PTYs don't work in windows so we create these unused pipes and
# mirror stdout to run.dir/output.log. There should be a way to make
# pipes work, but I haven't figured it out. See links in compat/windows
stdout_master_fd, stdout_slave_fd = os.pipe()
stderr_master_fd, stderr_slave_fd = os.pipe()
else:
stdout_master_fd, stdout_slave_fd = io_wrap.wandb_pty(resize=False)
stderr_master_fd, stderr_slave_fd = io_wrap.wandb_pty(resize=False)
headless_args = {
'command': 'headless',
'pid': os.getpid(),
'stdout_master_fd': stdout_master_fd,
'stderr_master_fd': stderr_master_fd,
'cloud': cloud,
'port': server.port
}
internal_cli_path = os.path.join(
os.path.dirname(__file__), 'internal_cli.py')
if six.PY2 or platform.system() == "Windows":
# TODO(adrian): close_fds=False is bad for security. we set
# it so we can pass the PTY FDs to the wandb process. We
# should use subprocess32, which has pass_fds.
popen_kwargs = {'close_fds': False}
else:
popen_kwargs = {'pass_fds': [stdout_master_fd, stderr_master_fd]}
# TODO(adrian): ensure we use *exactly* the same python interpreter
# TODO(adrian): make wandb the foreground process so we don't give
# up terminal control until syncing is finished.
# https://stackoverflow.com/questions/30476971/is-the-child-process-in-foreground-or-background-on-fork-in-c
wandb_process = subprocess.Popen([sys.executable, internal_cli_path, json.dumps(
headless_args)], env=environ, **popen_kwargs)
termlog('Tracking run with wandb version {}'.format(
__version__))
os.close(stdout_master_fd)
os.close(stderr_master_fd)
# Listen on the socket waiting for the wandb process to be ready
try:
success, _ = server.listen(30)
except KeyboardInterrupt:
success = False
else:
if not success:
termerror('W&B process (PID {}) did not respond'.format(
wandb_process.pid))
if not success:
wandb_process.kill()
for _ in range(20):
time.sleep(0.1)
if wandb_process.poll() is not None:
break
if wandb_process.poll() is None:
termerror('Failed to kill wandb process, PID {}'.format(
wandb_process.pid))
# TODO attempt to upload a debug log
path = GLOBAL_LOG_FNAME.replace(os.getcwd()+os.sep, "")
raise LaunchError(
"W&B process failed to launch, see: {}".format(path))
if platform.system() == "Windows":
output = open(os.path.join(run.dir, "output.log"), "wb")
stdout_redirector = io_wrap.WindowsRedirector(sys.stdout, output)
stderr_redirector = io_wrap.WindowsRedirector(sys.stderr, output)
else:
stdout_slave = os.fdopen(stdout_slave_fd, 'wb')
stderr_slave = os.fdopen(stderr_slave_fd, 'wb')
try:
stdout_redirector = io_wrap.FileRedirector(sys.stdout, stdout_slave)
stderr_redirector = io_wrap.FileRedirector(sys.stderr, stderr_slave)
except ValueError:
# stdout / err aren't files
output = open(os.path.join(run.dir, "output.log"), "wb")
stdout_redirector = io_wrap.WindowsRedirector(sys.stdout, output)
stderr_redirector = io_wrap.WindowsRedirector(sys.stderr, output)
# TODO(adrian): we should register this right after starting the wandb process to
# make sure we shut down the W&B process eg. if there's an exception in the code
# above
atexit.register(_user_process_finished, server, hooks,
wandb_process, stdout_redirector, stderr_redirector)
def _wandb_join(exit_code=None):
global _global_run_stack
shutdown_async_log_thread()
run.close_files()
if exit_code is not None:
hooks.exit_code = exit_code
_user_process_finished(server, hooks,
wandb_process, stdout_redirector, stderr_redirector)
if len(_global_run_stack) > 0:
_global_run_stack.pop()
join = _wandb_join
_user_process_finished_called = False
# redirect output last of all so we don't miss out on error messages
stdout_redirector.redirect()
if not env.is_debug():
stderr_redirector.redirect()
def load_ipython_extension(ipython):
pass
def login(anonymous=None, key=None):
"""Ensure this machine is logged in
You can manually specify a key, but this method is intended to prompt for user input.
anonymous can be "never", "must", or "allow". If set to "must" we'll always login anonymously,
if set to "allow" we'll only create an anonymous user if the user isn't already logged in.
Returns:
True if login was successful
False on failure
"""
# This ensures we have a global api object
ensure_configured()
if anonymous:
os.environ[env.ANONYMOUS] = anonymous
anonymous = anonymous or "never"
in_jupyter = _get_python_type() != "python"
if key:
termwarn("If you're specifying your api key in code, ensure this code is not shared publically.\nConsider setting the WANDB_API_KEY environment variable, or running `wandb login` from the command line.")
if in_jupyter:
termwarn("Calling wandb.login() without arguments from jupyter should prompt you for an api key.")
util.set_api_key(api, key)
elif api.api_key and anonymous != "must":
key = api.api_key
elif in_jupyter:
os.environ[env.JUPYTER] = "true"
# Don't return key to ensure it's not displayed in the notebook.
key = _jupyter_login(api=api)
else:
key = util.prompt_api_key(api)
return True if key else False
def _jupyter_login(force=True, api=None):
"""Attempt to login from a jupyter environment
If force=False, we'll only attempt to auto-login, otherwise we'll prompt the user
"""
def get_api_key_from_browser(signup=False):
key, anonymous = None, False
if 'google.colab' in sys.modules:
key = jupyter.attempt_colab_login(api.app_url)
elif 'databricks_cli' in sys.modules and 'dbutils' in sys.modules:
# Databricks does not seem to support getpass() so we need to fail
# early and prompt the user to configure the key manually for now.
termerror(
"Databricks requires api_key to be configured manually, instructions at: http://docs.wandb.com/integrations/databricks")
raise LaunchError("Databricks integration requires api_key to be configured.")
# For jupyter we default to not allowing anonymous
if not key and os.environ.get(env.ANONYMOUS, "never") != "never":
key = api.create_anonymous_api_key()
anonymous = True
if not key and force:
try:
termerror("Not authenticated. Copy a key from https://app.wandb.ai/authorize")
key = getpass.getpass("API Key: ").strip()
except NotImplementedError:
termerror(
"Can't accept input in this environment, you should set WANDB_API_KEY or call wandb.login(key='YOUR_API_KEY')")
return key, anonymous
api = api or (run.api if run else None)
if not api:
raise LaunchError("Internal error: api required for jupyter login")
return util.prompt_api_key(api, browser_callback=get_api_key_from_browser)
def _init_jupyter(run):
"""Asks for user input to configure the machine if it isn't already and creates a new run.
Log pushing and system stats don't start until `wandb.log()` is first called.
"""
from wandb import jupyter
from IPython.core.display import display, HTML
# TODO: Should we log to jupyter?
# global logging had to be disabled because it set the level to debug
# I also disabled run logging because we're rairly using it.
# try_to_set_up_global_logging()
# run.enable_logging()
os.environ[env.JUPYTER] = "true"
if not run.api.api_key:
# Fetches or prompts the users for an API key. Or if anonymode enabled, uses anonymous API key
key = _jupyter_login()
# Ensure our api client picks up the new key
if key:
run.api.reauth()
else:
run.mode = "dryrun"
display(HTML('''
<b>Could not authenticate.</b><br/>
'''))
run.resume = "allow"
if run.mode == "dryrun":
display(HTML('''
Using <a href="https://wandb.com" target="_blank">Weights & Biases</a> in dryrun mode. Not logging results to the cloud.<br/>
Call wandb.login() to authenticate this machine.<br/>
'''.format(run.api.app_url)))
else:
displayed = False
try:
sweep_url = run.get_sweep_url()
sweep_line = 'Sweep page: <a href="{}" target="_blank">{}</a><br/>\n'.format(
sweep_url, sweep_url) if sweep_url else ""
docs_html = '<a href="https://docs.wandb.com/integrations/jupyter.html" target="_blank">(Documentation)</a>'
display(HTML('''
Logging results to <a href="https://wandb.com" target="_blank">Weights & Biases</a> {}.<br/>
Project page: <a href="{}" target="_blank">{}</a><br/>
{}Run page: <a href="{}" target="_blank">{}</a><br/>
'''.format(docs_html, run.get_project_url(), run.get_project_url(), sweep_line, run.get_url(), run.get_url() )))
displayed = True
run.save()
except (CommError, ValueError) as e:
if not displayed:
display(HTML('''
Logging results to <a href="https://wandb.com" target="_blank">Weights & Biases</a>.<br/>
Couldn't load entity due to error: {}
'''.format(e.message)))
else:
termerror(str(e))
run.set_environment()
run._init_jupyter_agent()
ipython = get_ipython()
ipython.register_magics(jupyter.WandBMagics)
def reset_start():
"""Reset START_TIME to when the cell starts"""
global START_TIME
START_TIME = time.time()
ipython.events.register("pre_run_cell", reset_start)
def cleanup():
# shutdown async logger because _user_process_finished isn't called in jupyter
shutdown_async_log_thread()
run._stop_jupyter_agent()
ipython.events.register('post_run_cell', cleanup)
_user_process_finished_called = False
def _user_process_finished(server, hooks, wandb_process, stdout_redirector, stderr_redirector):
global _user_process_finished_called
if _user_process_finished_called:
return
_user_process_finished_called = True
trigger.call('on_finished')
stdout_redirector.restore()
if not env.is_debug():
stderr_redirector.restore()
termlog()
termlog("Waiting for W&B process to finish, PID {}".format(wandb_process.pid))
server.done(hooks.exit_code)
try:
while wandb_process.poll() is None:
time.sleep(0.1)
except KeyboardInterrupt:
termlog('Sending ctrl-c to W&B process, PID {}. Press ctrl-c again to kill it.'.format(wandb_process.pid))
try:
while wandb_process.poll() is None:
time.sleep(0.1)
except KeyboardInterrupt:
if wandb_process.poll() is None:
termlog('Killing W&B process, PID {}'.format(wandb_process.pid))
wandb_process.kill()
# Will be set to the run object for the current run, as returned by
# wandb.init(). We may want to get rid of this, but WandbCallback
# relies on it, and it improves the API a bit (user doesn't have to
# pass the run into WandbCallback). run is None instead of a PreInitObject
# as many places in the code check this.
run = None
config = util.PreInitObject("wandb.config") # config object shared with the global run
summary = util.PreInitObject("wandb.summary") # summary object shared with the global run
Api = PublicApi
# Stores what modules have been patched
patched = {
"tensorboard": [],
"keras": [],
"gym": []
}
_saved_files = set()
_global_run_stack = []
def join(exit_code=None):
"""Marks a run as finished"""
shutdown_async_log_thread()
if run:
run.close_files()
if len(_global_run_stack) > 0:
_global_run_stack.pop()
def save(glob_str, base_path=None, policy="live"):
""" Ensure all files matching *glob_str* are synced to wandb with the policy specified.
base_path: the base path to run the glob relative to
policy:
live: upload the file as it changes, overwriting the previous version
end: only upload file when the run ends
"""
global _saved_files
if run is None:
raise ValueError(
"You must call `wandb.init` before calling save")
if policy not in ("live", "end"):
raise ValueError(
'Only "live" and "end" policies are currently supported.')
if isinstance(glob_str, bytes):
glob_str = glob_str.decode('utf-8')
if not isinstance(glob_str, string_types):
raise ValueError("Must call wandb.save(glob_str) with glob_str a str")
if base_path is None:
base_path = os.path.dirname(glob_str)
wandb_glob_str = os.path.relpath(glob_str, base_path)
if "../" in wandb_glob_str:
raise ValueError(
"globs can't walk above base_path")
if (glob_str, base_path, policy) in _saved_files:
return []
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str)
return []
run.send_message(
{"save_policy": {"glob": wandb_glob_str, "policy": policy}})
files = []
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(run.dir, file_name)
util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite existing symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
_saved_files.add((glob_str, base_path, policy))
return files
def restore(name, run_path=None, replace=False, root=None):
""" Downloads the specified file from cloud storage into the current run directory
if it doesn exist.
name: the name of the file
run_path: optional path to a different run to pull files from
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
returns None if it can't find the file, otherwise a file object open for reading
raises wandb.CommError if it can't find the run
"""
if run_path is None and run is None:
raise ValueError(
"You must call `wandb.init` before calling restore or specify a run_path")
api = Api()
api_run = api.run(run_path or run.path)
root = root or run.dir if run else "."
path = os.path.join(root, name)
if os.path.exists(path) and replace == False:
return open(path, "r")
files = api_run.files([name])
if len(files) == 0:
return None
return files[0].download(root=root, replace=True)
_tunnel_process = None
def tunnel(host, port):
"""Simple helper to open a tunnel. Returns a public HTTPS url or None"""
global _tunnel_process
if _tunnel_process:
_tunnel_process.kill()
_tunnel_process = None
process = subprocess.Popen("ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=60 -R 80:{}:{} serveo.net".format(
host, port), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while process.returncode is None:
for line in process.stdout:
match = re.match(r".+(https.+)$", line.decode("utf-8").strip())
if match:
_tunnel_process = process
return match.group(1)
# set returncode if the process has exited
process.poll()
time.sleep(1)
return None
def monitor(options={}):
"""Starts syncing with W&B if you're in Jupyter. Displays your W&B charts live in a Jupyter notebook.
It's currently a context manager for legacy reasons.
"""
try:
from IPython.display import display
except ImportError:
def display(stuff): return None
class Monitor():
def __init__(self, options={}):
if os.getenv(env.JUPYTER):
display(jupyter.Run())
else:
self.rm = False
termerror(
"wandb.monitor is only functional in Jupyter notebooks")
def __enter__(self):
termlog(
"DEPRECATED: with wandb.monitor(): is deprecated, add %%wandb to the beginning of a cell to see live results.")
pass
def __exit__(self, *args):
pass
return Monitor(options)
_async_log_queue = queue.Queue()
_async_log_thread_shutdown_event = threading.Event()
_async_log_thread_complete_event = threading.Event()
_async_log_thread = None
def _async_log_thread_target():
"""Consumes async logs from our _async_log_queue and actually logs them"""
global _async_log_thread
shutdown_requested = False
while not shutdown_requested:
try:
kwargs = _async_log_queue.get(block=True, timeout=1)
log(**kwargs)
except queue.Empty:
shutdown_requested = _async_log_thread_shutdown_event.wait(1) and _async_log_queue.empty()
_async_log_thread_complete_event.set()
_async_log_thread = None
def _ensure_async_log_thread_started():
"""Ensures our log consuming thread is started"""
global _async_log_thread, _async_log_thread_shutdown_event, _async_log_thread_complete_event
if _async_log_thread is None:
_async_log_thread_shutdown_event = threading.Event()
_async_log_thread_complete_event = threading.Event()
_async_log_thread = threading.Thread(target=_async_log_thread_target)
_async_log_thread.daemon = True
_async_log_thread.start()
def shutdown_async_log_thread():
"""Shuts down our async logging thread"""
if _async_log_thread:
_async_log_thread_shutdown_event.set()
res = _async_log_thread_complete_event.wait(_shutdown_async_log_thread_wait_time) # TODO: possible race here
if res is False:
termwarn('async log queue not empty after %d seconds, some log statements will be dropped' % (
_shutdown_async_log_thread_wait_time))
# FIXME: it is worse than this, likely the program will crash because files will be closed
# FIXME: py 2.7 will return None here so we dont know if we dropped data
def log(row=None, commit=True, step=None, sync=True, *args, **kwargs):
"""Log a dict to the global run's history.
wandb.log({'train-loss': 0.5, 'accuracy': 0.9})
Args:
row (dict, optional): A dict of serializable python objects i.e str: ints, floats, Tensors, dicts, or wandb.data_types
commit (boolean, optional): Persist a set of metrics, if false just update the existing dict
step (integer, optional): The global step in processing. This sets commit=True any time step increases
sync (boolean, True): If set to False, process calls to log in a seperate thread
"""
if run is None:
raise ValueError(
"You must call `wandb.init` in the same process before calling log")
run.log(row, commit, step, sync, *args, **kwargs)
def ensure_configured():
global GLOBAL_LOG_FNAME, api
# We re-initialize here for tests
api = InternalApi()
GLOBAL_LOG_FNAME = os.path.abspath(os.path.join(wandb_dir(), 'debug.log'))
def uninit(only_patches=False):
"""Undo the effects of init(). Useful for testing.
"""
global run, config, summary, patched, _saved_files
if not only_patches:
run = None
config = util.PreInitObject("wandb.config")
summary = util.PreInitObject("wandb.summary")
_saved_files = set()
# UNDO patches
for mod in patched["tensorboard"]:
module = import_module(mod[0])
parts = mod[1].split(".")
if len(parts) > 1:
module = getattr(module, parts[0])
mod[1] = parts[1]
setattr(module, mod[1], getattr(module, "orig_"+mod[1]))
patched["tensorboard"] = []
def reset_env(exclude=[]):
"""Remove environment variables, used in Jupyter notebooks"""
if os.getenv(env.INITED):
wandb_keys = [key for key in os.environ.keys() if key.startswith(
'WANDB_') and key not in exclude]
for key in wandb_keys:
del os.environ[key]
return True
else:
return False
def try_to_set_up_global_logging():
"""Try to set up global W&B debug log that gets re-written by every W&B process.
It may fail (and return False) eg. if the current directory isn't user-writable
"""
root = logging.getLogger()
root.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d [%(filename)s:%(funcName)s():%(lineno)s] %(message)s')
if env.is_debug():
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
root.addHandler(handler)
try:
handler = logging.FileHandler(GLOBAL_LOG_FNAME, mode='w')
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
root.addHandler(handler)
except IOError as e: # eg. in case wandb directory isn't writable
termerror('Failed to set up logging: {}'.format(e))
return False
return True
def _get_python_type():
try:
if 'terminal' in get_ipython().__module__:
return 'ipython'
else:
return 'jupyter'
except (NameError, AttributeError):
return "python"
def sagemaker_auth(overrides={}, path="."):
""" Write a secrets.env file with the W&B ApiKey and any additional secrets passed.
Args:
overrides (dict, optional): Additional environment variables to write to secrets.env
path (str, optional): The path to write the secrets file.
"""
api_key = overrides.get(env.API_KEY, Api().api_key)
if api_key is None:
raise ValueError(
"Can't find W&B ApiKey, set the WANDB_API_KEY env variable or run `wandb login`")
overrides[env.API_KEY] = api_key
with open(os.path.join(path, "secrets.env"), "w") as file:
for k, v in six.iteritems(overrides):
file.write("{}={}\n".format(k, v))
def init(job_type=None, dir=None, config=None, project=None, entity=None, reinit=None, tags=None,
group=None, allow_val_change=False, resume=False, force=False, tensorboard=False,
sync_tensorboard=False, monitor_gym=False, name=None, notes=None, id=None, magic=None,
anonymous=None):
"""Initialize W&B
If called from within Jupyter, initializes a new run and waits for a call to
`wandb.log` to begin pushing metrics. Otherwise, spawns a new process
to communicate with W&B.
Args:
job_type (str, optional): The type of job running, defaults to 'train'
config (dict, argparse, or tf.FLAGS, optional): The hyper parameters to store with the run
project (str, optional): The project to push metrics to
entity (str, optional): The entity to push metrics to
dir (str, optional): An absolute path to a directory where metadata will be stored
group (str, optional): A unique string shared by all runs in a given group
tags (list, optional): A list of tags to apply to the run
id (str, optional): A globally unique (per project) identifier for the run
name (str, optional): A display name which does not have to be unique
notes (str, optional): A multiline string associated with the run
reinit (bool, optional): Allow multiple calls to init in the same process
resume (bool, str, optional): Automatically resume this run if run from the same machine,
you can also pass a unique run_id
sync_tensorboard (bool, optional): Synchronize wandb logs to tensorboard or tensorboardX
force (bool, optional): Force authentication with wandb, defaults to False
magic (bool, dict, or str, optional): magic configuration as bool, dict, json string,
yaml filename
anonymous (str, optional): Can be "allow", "must", or "never". Controls whether anonymous logging is allowed.
Defaults to never.
Returns:
A wandb.run object for metric and config logging.
"""
init_args = locals()
trigger.call('on_init', **init_args)
global run
global __stage_dir__
global _global_watch_idx
# We allow re-initialization when we're in Jupyter or explicity opt-in to it.
in_jupyter = _get_python_type() != "python"
if reinit or (in_jupyter and reinit != False):
# Reset global state for pytorch watch and tensorboard
_global_watch_idx = 0
if len(patched["tensorboard"]) > 0:
util.get_module("wandb.tensorboard").reset_state()
reset_env(exclude=env.immutable_keys())
if len(_global_run_stack) > 0:
if len(_global_run_stack) > 1:
termwarn("If you want to track multiple runs concurrently in wandb you should use multi-processing not threads")
join()
run = None
# TODO: deprecate tensorboard
if tensorboard or sync_tensorboard and len(patched["tensorboard"]) == 0:
util.get_module("wandb.tensorboard").patch()
if monitor_gym and len(patched["gym"]) == 0:
util.get_module("wandb.gym").monitor()
sagemaker_config = util.parse_sm_config()
tf_config = util.parse_tfjob_config()
if group == None:
group = os.getenv(env.RUN_GROUP)
if job_type == None:
job_type = os.getenv(env.JOB_TYPE)
if sagemaker_config:
# Set run_id and potentially grouping if we're in SageMaker
run_id = os.getenv('TRAINING_JOB_NAME')
if run_id:
os.environ[env.RUN_ID] = '-'.join([
run_id,
os.getenv('CURRENT_HOST', socket.gethostname())])
conf = json.load(
open("/opt/ml/input/config/resourceconfig.json"))
if group == None and len(conf["hosts"]) > 1:
group = os.getenv('TRAINING_JOB_NAME')
# Set secret variables
if os.path.exists("secrets.env"):
for line in open("secrets.env", "r"):
key, val = line.strip().split('=', 1)
os.environ[key] = val
elif tf_config:
cluster = tf_config.get('cluster')
job_name = tf_config.get('task', {}).get('type')
task_index = tf_config.get('task', {}).get('index')
if job_name is not None and task_index is not None:
# TODO: set run_id for resuming?
run_id = cluster[job_name][task_index].rsplit(":")[0]
if job_type == None:
job_type = job_name
if group == None and len(cluster.get("worker", [])) > 0:
group = cluster[job_name][0].rsplit("-"+job_name, 1)[0]
image = util.image_id_from_k8s()
if image:
os.environ[env.DOCKER] = image
if project:
os.environ[env.PROJECT] = project
if entity:
os.environ[env.ENTITY] = entity
if group:
os.environ[env.RUN_GROUP] = group
if job_type:
os.environ[env.JOB_TYPE] = job_type
if tags:
if isinstance(tags, str):
# People sometimes pass a string instead of an array of strings...
tags = [tags]
os.environ[env.TAGS] = ",".join(tags)
if id:
os.environ[env.RUN_ID] = id
if name is None and resume is not "must":
# We do this because of https://github.com/wandb/core/issues/2170
# to ensure that the run's name is explicitly set to match its
# id. If we don't do this and the id is eight characters long, the
# backend will set the name to a generated human-friendly value.
#
# In any case, if the user is explicitly setting `id` but not
# `name`, their id is probably a meaningful string that we can
# use to label the run.
#
# In the resume="must" case, we know we are resuming, so we should
# make sure to not set the name because it would have been set with
# the original run.
#
# TODO: handle "auto" resume by moving this logic later when we know
# if there is a resume.
name = os.environ.get(env.NAME, id) # environment variable takes precedence over this.
if name:
os.environ[env.NAME] = name
if notes:
os.environ[env.NOTES] = notes
if magic is not None and magic is not False:
if isinstance(magic, dict):
os.environ[env.MAGIC] = json.dumps(magic)
elif isinstance(magic, str):
os.environ[env.MAGIC] = magic
elif isinstance(magic, bool):
pass
else:
termwarn("wandb.init called with invalid magic parameter type", repeat=False)
from wandb import magic_impl
magic_impl.magic_install(init_args=init_args)
if dir:
os.environ[env.DIR] = dir
util.mkdir_exists_ok(wandb_dir())
if anonymous is not None:
os.environ[env.ANONYMOUS] = anonymous
if os.environ.get(env.ANONYMOUS, "never") not in ["allow", "must", "never"]:
raise LaunchError("anonymous must be set to 'allow', 'must', or 'never'")
resume_path = os.path.join(wandb_dir(), wandb_run.RESUME_FNAME)
if resume == True:
os.environ[env.RESUME] = "auto"
elif resume in ("allow", "must", "never"):
os.environ[env.RESUME] = resume
if id:
os.environ[env.RUN_ID] = id
elif resume:
os.environ[env.RESUME] = os.environ.get(env.RESUME, "allow")
# TODO: remove allowing resume as a string in the future
os.environ[env.RUN_ID] = id or resume
elif os.path.exists(resume_path):
os.remove(resume_path)
if os.environ.get(env.RESUME) == 'auto' and os.path.exists(resume_path):
if not os.environ.get(env.RUN_ID):
os.environ[env.RUN_ID] = json.load(open(resume_path))["run_id"]
# the following line is useful to ensure that no W&B logging happens in the user
# process that might interfere with what they do
# logging.basicConfig(format='user process %(asctime)s - %(name)s - %(levelname)s - %(message)s')
# If a thread calls wandb.init() it will get the same Run object as
# the parent. If a child process with distinct memory space calls
# wandb.init(), it won't get an error, but it will get a result of
# None.
# This check ensures that a child process can safely call wandb.init()
# after a parent has (only the parent will create the Run object).
# This doesn't protect against the case where the parent doesn't call
# wandb.init but two children do.
if run or os.getenv(env.INITED):
return run
if __stage_dir__ is None:
__stage_dir__ = "wandb"
util.mkdir_exists_ok(wandb_dir())
try:
signal.signal(signal.SIGQUIT, _debugger)
except AttributeError:
pass
try:
run = wandb_run.Run.from_environment_or_defaults()
_global_run_stack.append(run)
except IOError as e:
termerror('Failed to create run directory: {}'.format(e))
raise LaunchError("Could not write to filesystem.")
run.set_environment()
def set_global_config(run):
global config # because we already have a local config
config = run.config
set_global_config(run)
global summary
summary = run.summary
# set this immediately after setting the run and the config. if there is an
# exception after this it'll probably break the user script anyway
os.environ[env.INITED] = '1'
if in_jupyter:
_init_jupyter(run)
elif run.mode == 'clirun':
pass
elif run.mode == 'run':
api = InternalApi()
# let init_jupyter handle this itself
if not in_jupyter and not api.api_key:
termlog(
"W&B is a tool that helps track and visualize machine learning experiments")
if force:
termerror(
"No credentials found. Run \"wandb login\" or \"wandb off\" to disable wandb")
else:
if util.prompt_api_key(api):
_init_headless(run)
else:
termlog(
"No credentials found. Run \"wandb login\" to visualize your metrics")
run.mode = "dryrun"
_init_headless(run, False)
else:
_init_headless(run)
elif run.mode == 'dryrun':
termlog(
'Dry run mode, not syncing to the cloud.')
_init_headless(run, False)
else:
termerror(
'Invalid run mode "%s". Please unset WANDB_MODE.' % run.mode)
raise LaunchError("The WANDB_MODE environment variable is invalid.")
# set the run directory in the config so it actually gets persisted
run.config.set_run_dir(run.dir)
# we have re-read the config, add telemetry data
telemetry_updated = run.config._telemetry_update()
if sagemaker_config:
run.config._update(sagemaker_config)
allow_val_change = True
if config or telemetry_updated:
run.config._update(config, allow_val_change=allow_val_change, as_defaults=not allow_val_change)
# Access history to ensure resumed is set when resuming
run.history
# Load the summary to support resuming
run.summary.load()
return run
tensorflow = util.LazyLoader('tensorflow', globals(), 'wandb.tensorflow')
tensorboard = util.LazyLoader('tensorboard', globals(), 'wandb.tensorboard')
jupyter = util.LazyLoader('jupyter', globals(), 'wandb.jupyter')
keras = util.LazyLoader('keras', globals(), 'wandb.keras')
fastai = util.LazyLoader('fastai', globals(), 'wandb.fastai')
docker = util.LazyLoader('docker', globals(), 'wandb.docker')
xgboost = util.LazyLoader('xgboost', globals(), 'wandb.xgboost')
gym = util.LazyLoader('gym', globals(), 'wandb.gym')
ray = util.LazyLoader('ray', globals(), 'wandb.ray')
__all__ = ['init', 'config', 'summary', 'join', 'login', 'log', 'save', 'restore',
'tensorflow', 'watch', 'types', 'tensorboard', 'jupyter', 'keras', 'fastai',
'docker', 'xgboost', 'gym', 'ray', 'run', 'join', 'Image', 'Video',
'Audio', 'Table', 'Html', 'Object3D', 'Histogram', 'Graph', 'Api']
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import _pywrap_stacktrace_handler
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import _pywrap_util_port
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import traceback_utils
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable/disable MLIR
# compilation.
def is_mlir_bridge_enabled():
return None
try:
from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
def is_asan_enabled():
"""Check if ASAN is enabled."""
return pywrap_sanitizers.is_asan_enabled()
def is_msan_enabled():
"""Check if MSAN is enabled."""
return pywrap_sanitizers.is_msan_enabled()
def is_tsan_enabled():
"""Check if TSAN is enabled."""
return pywrap_sanitizers.is_tsan_enabled()
def is_ubsan_enabled():
"""Check if UBSAN is enabled."""
return pywrap_sanitizers.is_ubsan_enabled()
def _get_object_count_by_type(exclude=()):
return (
collections.Counter([type(obj).__name__ for obj in gc.get_objects()]) -
collections.Counter([type(obj).__name__ for obj in exclude]))
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or a empty string.
This method should only be used in tests written with `tf.test.TestCase`.
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_gpu_support():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device(tf.test.gpu_device_name()):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
"""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(compat.as_bytes(_SHARDED_SAVE_OP_PATTERN),
attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(compat.as_bytes(_TABLE_SHARED_NAME_PATTERN),
node.attr["shared_name"].s):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return (_pywrap_util_port.IsMklEnabled() or
os.getenv("TF_ENABLE_ONEDNN_OPTS", "False").lower() in ["true", "1"])
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
# Make sure any registered functions are cleaned up in the C++ runtime.
registered_function_names = context.context().list_function_names()
# unittest.doCleanups adds to self._outcome with each unwound call.
# These objects are retained across gc collections so we exclude them
# from the object count calculation.
obj_count_by_type = _get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped))
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = (
_get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped)) -
obj_count_by_type)
# There should be no newly registered functions hanging around.
leftover_functions = (
context.context().list_function_names() - registered_function_names)
assert not leftover_functions, (
"The following functions were newly created: %s" %
leftover_functions)
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
for i, obj in enumerate(gc.garbage[previous_garbage:]):
# Known false positive for ast.fix_missing_locations.
if getattr(obj, "__module__", "") == "ast":
new_garbage -= 3
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def enable_eager_op_as_function(fn):
"""Decorator for enabling eager_op_as_function on a test.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will enable run_eager_op_as_function,
reset the context, execute the test, then reset the context to the state
it was in prior to this test.
Example:
class MyTest(test.TestCase):
@enable_eager_op_as_function
def testFoo(self):
...
Args:
fn: the function to be wrapped.
Returns:
The wrapped function.
"""
def wrapper(*args, **kwargs):
# If `run_eager_op_as_function` is already enabled do nothing.
if context.run_eager_op_as_function_enabled():
return fn(*args, **kwargs)
context.enable_run_eager_op_as_function()
try:
return fn(*args, **kwargs)
finally:
context.disable_run_eager_op_as_function()
return wrapper
def with_eager_op_as_function(cls=None, only_as_function=False):
"""Adds methods that call original methods with eager_op_as_function enabled.
Example:
@test_util.with_eager_op_as_function
class SessionTest(test.TestCase):
def testEnabledForEagerOpAsFunction(self):
...
@disable_eager_op_as_function("b/xyzabc")
def testDisabledForEagerOpAsFunction(self):
...
Generated class:
class SessionTest(test.TestCase):
def testEnabledForEagerOpAsFunction(self):
...
def testEnabledForEagerOpAsFunctionWithEagerOpAsFunctionEnabled(self):
// Enable run_eager_op_as_function
// Reset context
testEnabledForEagerOpAsFunction(self)
// Disable run_eager_op_as_function
// Reset context
def testDisabledForEagerOpAsFunction(self):
...
Args:
cls: class to decorate.
only_as_function: whether to run all the tests in the TestCase in eager mode
and in eager_op_as_function mode. By default it will run all tests in both
modes. When `only_as_function=True` tests will not be run in eager mode.
Returns:
cls with new test methods added.
"""
def decorator(cls):
if context.run_eager_op_as_function_enabled():
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
(name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("benchmark")) and
not getattr(value, "_disable_eager_op_as_function", False)):
setattr(cls, name + "WithEagerOpAsFunctionEnabled",
enable_eager_op_as_function(value))
if only_as_function:
delattr(cls, name)
return cls
if cls is not None:
return decorator(cls)
return decorator
def disable_eager_op_as_function(unused_msg):
"""Decorator for a function in a with_eager_op_as_function enabled test class.
Blocks the function from being run with eager_op_as_function enabled.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_eager_op_as_function attr set to True.
"""
def wrapper(func):
func._disable_eager_op_as_function = True
return func
return wrapper
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_or_tpu(func=None):
"""Execute the decorated test only if a physical GPU or TPU is available.
This function is intended to be applied to tests that require the presence
of a physical GPU or TPU. It complies with the following rules:
- If a GPU is available, the test will run on the GPU.
- If a GPU is absent and a TPU is available, the test will run on the TPU.
- If both GPU and TPU are absent, the test will be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_or_tpu` only supports test methods.")
def decorated(self, *args, **kwargs):
if config.list_physical_devices("GPU"):
return f(self, "GPU", *args, **kwargs)
if config.list_physical_devices("TPU"):
return f(self, "TPU", *args, **kwargs)
self.skipTest("Test requires GPU or TPU")
return decorated
return decorator if func is None else decorator(func)
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
For example,
>>> gpu_available = tf.test.is_gpu_available()
>>> is_cuda_gpu_available = tf.test.is_gpu_available(cuda_only=True)
>>> is_cuda_gpu_min_3 = tf.test.is_gpu_available(True, (3,0))
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
@contextlib.contextmanager
def deterministic_ops():
"""Enables deterministic ops."""
try:
config.enable_op_determinism()
yield
finally:
config.disable_op_determinism()
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return tf_decorator.make_decorator(func, decorated)
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_asan(description): # pylint: disable=unused-argument
"""Execute the test method only if ASAN is not enabled."""
execute_func = not is_asan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_msan(description): # pylint: disable=unused-argument
"""Execute the test method only if MSAN is not enabled."""
execute_func = not is_msan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tsan(description): # pylint: disable=unused-argument
"""Execute the test method only if TSAN is not enabled."""
execute_func = not is_tsan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_ubsan(description): # pylint: disable=unused-argument
"""Execute the test method only if UBSAN is not enabled."""
execute_func = not is_ubsan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if tfrt_utils.enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if tfrt_utils.enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
This also casts complex64 inputs to complex128, since TensorFloat-32 can also
be used with complex64
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
elif config.tensor_float_32_execution_enabled() and a.dtype == "complex64":
a = math_ops.cast(a, "complex128")
b = math_ops.cast(b, "complex128")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
# Make sure we get unfiltered stack traces during the test
traceback_utils.disable_traceback_filtering()
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
# Check if the mlir bridge has been explicitly enabled or disabled. If
# is_mlir_bridge_enabled() returns None, the user did not explictly enable
# or disable the bridge so do not update enable_mlir_bridge.
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
elif is_mlir_bridge_enabled() is not None:
context.context().enable_mlir_bridge = False
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
# This flag provides the ability to control whether the graph mode gets
# initialized for TF1 or not. Initializing for TF1, which is what was
# happening earlier, was preventing enablement of 'eager mode' in the test.
self._set_default_seed = True
def setUp(self):
super(TensorFlowTestCase, self).setUp()
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
if self._set_default_seed:
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
super(TensorFlowTestCase, self).tearDown()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message, msg=msg)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s." %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=True, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session():
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session() as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tf_type(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def evaluate_if_both_tensors(self, a, b):
if (tensor_util.is_tf_type(a) and tensor_util.is_tf_type(b) and
not isinstance(a, ops._EagerTensorBase) and
not isinstance(b, ops._EagerTensorBase)):
return self.evaluate((a, b))
else:
return (a, b)
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
# np.allclose does not always work for our custom bfloat16 extension type
# when type promotions are involved, so we first cast any bfloat16 arrays
# to float32.
a_dtype = a.dtype
a = a.astype(np.float32) if a.dtype == dtypes.bfloat16.as_numpy_dtype else a
b = b.astype(np.float32) if b.dtype == dtypes.bfloat16.as_numpy_dtype else b
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a_dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
(a, b) = self.evaluate_if_both_tensors(a, b)
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError, NotImplementedError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
except AssertionError:
return
msg = msg or ""
raise AssertionError("The two values are close at all elements. %s" % msg)
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
if np.ndim(value) == 0:
return [prefix + "[0] : " + str(value)]
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertRaisesIncompatibleShapesError(
self, exception_type=errors.InvalidArgumentError):
return self.assertRaisesWithPredicateMatch(
exception_type, r"Incompatible shapes|Dimensions must be equal|"
r"required broadcastable shapes")
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
@py_func_if_in_function
def assertDictEqual(self, a, b, msg=None):
"""Assert that two given dictionary of tensors are the same.
Args:
a: Expected dictionary with numpy ndarray or anything else that can be
converted to one as values.
b: Actual dictionary with numpy ndarray or anything else that can be
converted to one as values.
msg: Optional message to report on failure.
"""
# To keep backwards compatibility, we first try the base class
# assertDictEqual. If that fails we try the tensorflow one.
try:
super().assertDictEqual(a, b, msg)
except Exception: # pylint: disable=broad-except
self.assertSameElements(a.keys(), b.keys()) # pylint: disable=g-assert-in-except
for k, v in a.items():
(a_k, b_k) = self.evaluate_if_both_tensors(v, b[k])
a_k = self._GetNdArray(a_k)
b_k = self._GetNdArray(b_k)
if np.issubdtype(a_k.dtype, np.floating):
self.assertAllClose(v, b[k], msg=k)
else:
self.assertAllEqual(v, b[k], msg=k)
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
|
core.py
|
# coding:utf-8
#
# PROGRAM/MODULE: firebird-driver
# FILE: firebird/driver/core.py
# DESCRIPTION: Main driver code (connection, transaction, cursor etc.)
# CREATED: 25.3.2020
#
# The contents of this file are subject to the MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Copyright (c) 2020 Firebird Project (www.firebirdsql.org)
# All Rights Reserved.
#
# Contributor(s): Pavel Císař (original code)
# ______________________________________
"""firebird-driver - Main driver code (connection, transaction, cursor etc.)
"""
from __future__ import annotations
from typing import Any, Type, Union, Dict, Set, List, Tuple, Sequence, Mapping, Optional, \
BinaryIO, Callable
import sys
import os
import weakref
import itertools
import threading
import io
import contextlib
import struct
from abc import ABC, abstractmethod
from warnings import warn
from queue import PriorityQueue
from ctypes import memset, memmove, create_string_buffer, byref, string_at, addressof, pointer
from firebird.base.types import Sentinel, UNLIMITED, ByteOrder
from firebird.base.logging import LoggingIdMixin, UNDEFINED
from firebird.base.buffer import MemoryBuffer, BufferFactory, BytesBufferFactory, \
CTypesBufferFactory, safe_ord
from . import fbapi as a
from .types import *
from .interfaces import iAttachment, iTransaction, iStatement, iMessageMetadata, iBlob, \
iResultSet, iDtc, iService, iCryptKeyCallbackImpl
from .hooks import APIHook, ConnectionHook, ServerHook, register_class, get_callbacks, add_hook
from .config import driver_config
SHRT_MIN = -32768
SHRT_MAX = 32767
USHRT_MAX = 65535
INT_MIN = -2147483648
INT_MAX = 2147483647
UINT_MAX = 4294967295
LONG_MIN = -9223372036854775808
LONG_MAX = 9223372036854775807
MAX_BLOB_SEGMENT_SIZE = 65535
FS_ENCODING = sys.getfilesystemencoding()
#: Python dictionary that maps Firebird character set names (key) to Python character sets (value).
CHARSET_MAP = {None: a.getpreferredencoding(), 'NONE': a.getpreferredencoding(),
'OCTETS': None, 'UNICODE_FSS': 'utf_8', 'UTF8': 'utf_8', 'UTF-8': 'utf_8',
'ASCII': 'ascii', 'SJIS_0208': 'shift_jis', 'EUCJ_0208': 'euc_jp',
'DOS737': 'cp737', 'DOS437': 'cp437', 'DOS850': 'cp850',
'DOS865': 'cp865', 'DOS860': 'cp860', 'DOS863': 'cp863',
'DOS775': 'cp775', 'DOS862': 'cp862', 'DOS864': 'cp864',
'ISO8859_1': 'iso8859_1', 'ISO8859_2': 'iso8859_2',
'ISO8859_3': 'iso8859_3', 'ISO8859_4': 'iso8859_4',
'ISO8859_5': 'iso8859_5', 'ISO8859_6': 'iso8859_6',
'ISO8859_7': 'iso8859_7', 'ISO8859_8': 'iso8859_8',
'ISO8859_9': 'iso8859_9', 'ISO8859_13': 'iso8859_13',
'KSC_5601': 'euc_kr', 'DOS852': 'cp852', 'DOS857': 'cp857',
'DOS858': 'cp858', 'DOS861': 'cp861', 'DOS866': 'cp866',
'DOS869': 'cp869', 'WIN1250': 'cp1250', 'WIN1251': 'cp1251',
'WIN1252': 'cp1252', 'WIN1253': 'cp1253', 'WIN1254': 'cp1254',
'BIG_5': 'big5', 'GB_2312': 'gb2312', 'WIN1255': 'cp1255',
'WIN1256': 'cp1256', 'WIN1257': 'cp1257', 'GB18030': 'gb18030',
'GBK': 'gbk', 'KOI8R': 'koi8_r', 'KOI8U': 'koi8_u',
'WIN1258': 'cp1258',
}
# Internal
_master = None
_util = None
_thns = threading.local()
_tenTo = [10 ** x for x in range(20)]
_i2name = {DbInfoCode.READ_SEQ_COUNT: 'sequential', DbInfoCode.READ_IDX_COUNT: 'indexed',
DbInfoCode.INSERT_COUNT: 'inserts', DbInfoCode.UPDATE_COUNT: 'updates',
DbInfoCode.DELETE_COUNT: 'deletes', DbInfoCode.BACKOUT_COUNT: 'backouts',
DbInfoCode.PURGE_COUNT: 'purges', DbInfoCode.EXPUNGE_COUNT: 'expunges'}
_bpb_stream = bytes([1, BPBItem.TYPE, 1, BlobType.STREAM])
# Info structural codes
isc_info_end = 1
isc_info_truncated = 2
isc_info_error = 3
isc_info_data_not_ready = 4
def __api_loaded(api: a.FirebirdAPI) -> None:
setattr(sys.modules[__name__], '_master', api.fb_get_master_interface())
setattr(sys.modules[__name__], '_util', _master.get_util_interface())
add_hook(APIHook.LOADED, a.FirebirdAPI, __api_loaded)
def _create_blob_buffer(size: int=MAX_BLOB_SEGMENT_SIZE) -> Any:
if size < MAX_BLOB_SEGMENT_SIZE:
result = getattr(_thns, 'blob_buf', None)
if result is None:
result = create_string_buffer(MAX_BLOB_SEGMENT_SIZE)
_thns.blob_buf = result
else:
memset(result, 0, MAX_BLOB_SEGMENT_SIZE)
else:
result = create_string_buffer(size)
return result
def _encode_timestamp(v: Union[datetime.datetime, datetime.date]) -> bytes:
# Convert datetime.datetime or datetime.date to BLR format timestamp
if isinstance(v, datetime.datetime):
return _util.encode_date(v.date()).to_bytes(4, 'little') + _util.encode_time(v.time()).to_bytes(4, 'little')
elif isinstance(v, datetime.date):
return _util.encode_date(v.date()).to_bytes(4, 'little') + _util.encode_time(datetime.time()).to_bytes(4, 'little')
else:
raise ValueError("datetime.datetime or datetime.date expected")
def _is_fixed_point(dialect: int, datatype: SQLDataType, subtype: int,
scale: int) -> bool:
return ((datatype in [SQLDataType.SHORT, SQLDataType.LONG, SQLDataType.INT64]
and (subtype or scale)) or
((dialect < 3) and scale
and (datatype in [SQLDataType.DOUBLE, SQLDataType.D_FLOAT])))
def _get_external_data_type_name(dialect: int, datatype: SQLDataType,
subtype: int, scale: int) -> str:
if datatype == SQLDataType.TEXT:
return 'CHAR'
elif datatype == SQLDataType.VARYING:
return 'VARCHAR'
elif _is_fixed_point(dialect, datatype, subtype, scale):
if subtype == 1:
return 'NUMERIC'
elif subtype == 2:
return 'DECIMAL'
else:
return 'NUMERIC/DECIMAL'
elif datatype == SQLDataType.SHORT:
return 'SMALLINT'
elif datatype == SQLDataType.LONG:
return 'INTEGER'
elif datatype == SQLDataType.INT64:
return 'BIGINT'
elif datatype == SQLDataType.FLOAT:
return 'FLOAT'
elif datatype in [SQLDataType.DOUBLE, SQLDataType.D_FLOAT]:
return 'DOUBLE'
elif datatype == SQLDataType.TIMESTAMP:
return 'TIMESTAMP'
elif datatype == SQLDataType.DATE:
return 'DATE'
elif datatype == SQLDataType.TIME:
return 'TIME'
elif datatype == SQLDataType.BLOB:
return 'BLOB'
elif datatype == SQLDataType.BOOLEAN:
return 'BOOLEAN'
else:
return 'UNKNOWN'
def _get_internal_data_type_name(data_type: SQLDataType) -> str:
if data_type in [SQLDataType.DOUBLE, SQLDataType.D_FLOAT]:
value = SQLDataType.DOUBLE
else:
value = data_type
return value.name
def _check_integer_range(value: int, dialect: int, datatype: SQLDataType,
subtype: int, scale: int) -> None:
if datatype == SQLDataType.SHORT:
vmin = SHRT_MIN
vmax = SHRT_MAX
elif datatype == SQLDataType.LONG:
vmin = INT_MIN
vmax = INT_MAX
elif datatype == SQLDataType.INT64:
vmin = LONG_MIN
vmax = LONG_MAX
if (value < vmin) or (value > vmax):
msg = """numeric overflow: value %s
(%s scaled for %d decimal places) is of
too great a magnitude to fit into its internal storage type %s,
which has range [%s,%s].""" % (str(value),
_get_external_data_type_name(dialect, datatype,
subtype, scale),
scale,
_get_internal_data_type_name(datatype),
str(vmin), str(vmax))
raise ValueError(msg)
def _is_str_param(value: Any, datatype: SQLDataType) -> bool:
return ((isinstance(value, str) and datatype != SQLDataType.BLOB) or
datatype in [SQLDataType.TEXT, SQLDataType.VARYING])
def create_meta_descriptors(meta: iMessageMetadata) -> List[ItemMetadata]:
result = []
for i in range(meta.get_count()):
result.append(ItemMetadata(field=meta.get_field(i),
relation=meta.get_relation(i),
owner=meta.get_owner(i),
alias=meta.get_alias(i),
datatype=meta.get_type(i),
nullable=meta.is_nullable(i),
subtype=meta.get_subtype(i),
length=meta.get_length(i),
scale=meta.get_scale(i),
charset=meta.get_charset(i),
offset=meta.get_offset(i),
null_offset=meta.get_null_offset(i)
))
return result
# Context managers
@contextlib.contextmanager
def transaction(transact_object: Transactional, *, tpb: bytes=None,
bypass: bool=False) -> Transactional:
"""Context manager for `~firebird.driver.types.Transactional` objects.
Starts new transaction when context is entered. On exit calls `rollback()` when
exception was raised, or `commit()` if there was no error. Exception raised
in managed context is NOT suppressed.
Arguments:
transact_object: Managed transactional object.
tpb: Transaction parameter buffer used to start the transaction.
bypass: When both `bypass` and `transact_object.is_active()` are `True` when
context is entered, the context manager does nothing on exit.
"""
if bypass and transact_object.is_active():
yield transact_object
else:
try:
transact_object.begin(tpb)
yield transact_object
except:
transact_object.rollback()
raise
else:
transact_object.commit()
@contextlib.contextmanager
def temp_database(*args, **kwargs) -> Connection:
"""Context manager for temporary databases. Creates new database when context
is entered, and drops it on exit. Exception raised in managed context is NOT suppressed.
All positional and keyword arguments are passed to `create_database`.
"""
con = create_database(*args, **kwargs)
try:
yield con
except:
con.drop_database()
raise
else:
con.drop_database()
_OP_DIE = object()
_OP_RECORD_AND_REREGISTER = object()
# Managers for Parameter buffers
class TPB:
"""Transaction Parameter Buffer.
"""
def __init__(self, *, access_mode: TraAccessMode = TraAccessMode.WRITE,
isolation: Isolation = Isolation.SNAPSHOT,
lock_timeout: int = -1, no_auto_undo: bool = False,
auto_commit: bool = False, ignore_limbo: bool = False,
at_snapshot_number: int=None, encoding: str='ascii'):
self.encoding: str = encoding
self.access_mode: TraAccessMode = access_mode
self.isolation: Isolation = isolation
self.lock_timeout: int = lock_timeout
self.no_auto_undo: bool = no_auto_undo
self.auto_commit: bool = auto_commit
self.ignore_limbo: bool = ignore_limbo
self._table_reservation: List[Tuple[str, TableShareMode, TableAccessMode]] = []
# Firebird 4
self.at_snapshot_number: int = at_snapshot_number
def clear(self) -> None:
"""Clear all information.
"""
self.access_mode = TraAccessMode.WRITE
self.isolation = Isolation.SNAPSHOT
self.lock_timeout = -1
self.no_auto_undo = False
self.auto_commit = False
self.ignore_limbo = False
self._table_reservation = []
# Firebird 4
self.at_snapshot_number = None
def parse_buffer(self, buffer: bytes) -> None:
"""Load information from TPB.
"""
self.clear()
with a.get_api().util.get_xpb_builder(XpbKind.TPB, buffer) as tpb:
while not tpb.is_eof():
tag = tpb.get_tag()
if tag in TraAccessMode._value2member_map_:
self.access_mode = TraAccessMode(tag)
elif tag in TraIsolation._value2member_map_:
isolation = TraIsolation(tag)
if isolation != TraIsolation.READ_COMMITTED:
self.isolation = Isolation(isolation)
elif tag in TraReadCommitted._value2member_map_:
isolation = TraReadCommitted(tag)
if isolation == TraReadCommitted.RECORD_VERSION:
self.isolation = Isolation.READ_COMMITTED_RECORD_VERSION
else:
self.isolation = Isolation.READ_COMMITTED_NO_RECORD_VERSION
elif tag in TraLockResolution._value2member_map_:
self.lock_timeout = -1 if TraLockResolution(tag).WAIT else 0
elif tag == TPBItem.AUTOCOMMIT:
self.auto_commit = True
elif tag == TPBItem.NO_AUTO_UNDO:
self.no_auto_undo = True
elif tag == TPBItem.IGNORE_LIMBO:
self.ignore_limbo = True
elif tag == TPBItem.LOCK_TIMEOUT:
self.lock_timeout = tpb.get_int()
elif tag == TPBItem.AT_SNAPSHOT_NUMBER:
self.at_snapshot_number = tpb.get_bigint()
elif tag in TableAccessMode._value2member_map_:
tbl_access = TableAccessMode(tag)
tbl_name = tpb.get_string(encoding=self.encoding)
tpb.move_next()
if tpb.is_eof():
raise ValueError(f"Missing share mode value in table {tbl_name} reservation")
if (val := tpb.get_tag()) not in TableShareMode._value2member_map_:
raise ValueError(f"Missing share mode value in table {tbl_name} reservation")
tbl_share = TableShareMode(val)
self.reserve_table(tbl_name, tbl_share, tbl_access)
tpb.move_next()
def get_buffer(self) -> bytes:
"""Create TPB from stored information.
"""
with a.get_api().util.get_xpb_builder(XpbKind.TPB) as tpb:
tpb.insert_tag(self.access_mode)
isolation = (Isolation.READ_COMMITTED_RECORD_VERSION
if self.isolation == Isolation.READ_COMMITTED
else self.isolation)
if isolation in [Isolation.SNAPSHOT, Isolation.SERIALIZABLE]:
tpb.insert_tag(isolation)
elif isolation == Isolation.READ_COMMITTED_READ_CONSISTENCY:
tpb.insert_tag(TPBItem.READ_CONSISTENCY)
else:
tpb.insert_tag(TraIsolation.READ_COMMITTED)
tpb.insert_tag(TraReadCommitted.RECORD_VERSION
if isolation == Isolation.READ_COMMITTED_RECORD_VERSION
else TraReadCommitted.NO_RECORD_VERSION)
tpb.insert_tag(TraLockResolution.NO_WAIT if self.lock_timeout == 0 else TraLockResolution.WAIT)
if self.lock_timeout > 0:
tpb.insert_int(TPBItem.LOCK_TIMEOUT, self.lock_timeout)
if self.auto_commit:
tpb.insert_tag(TPBItem.AUTOCOMMIT)
if self.no_auto_undo:
tpb.insert_tag(TPBItem.NO_AUTO_UNDO)
if self.ignore_limbo:
tpb.insert_tag(TPBItem.IGNORE_LIMBO)
if self.at_snapshot_number is not None:
tpb.insert_bigint(TPBItem.AT_SNAPSHOT_NUMBER, self.at_snapshot_number)
for table in self._table_reservation:
# Access mode + table name
tpb.insert_string(table[2], table[0], encoding=self.encoding)
tpb.insert_tag(table[1]) # Share mode
result = tpb.get_buffer()
return result
def reserve_table(self, name: str, share_mode: TableShareMode, access_mode: TableAccessMode) -> None:
"""Set information about table reservation.
"""
self._table_reservation.append((name, share_mode, access_mode))
class DPB:
"""Database Parameter Buffer.
"""
def __init__(self, *, user: str=None, password: str=None, role: str=None,
trusted_auth: bool=False, sql_dialect: int=3, timeout: int=None,
charset: str='UTF8', cache_size: int=None, no_gc: bool=False,
no_db_triggers: bool=False, no_linger: bool=False,
utf8filename: bool=False, dbkey_scope: DBKeyScope=None,
dummy_packet_interval: int=None, overwrite: bool=False,
db_cache_size: int=None, forced_writes: bool=None,
reserve_space: bool=None, page_size: int=None, read_only: bool=False,
sweep_interval: int=None, db_sql_dialect: int=None, db_charset: str=None,
config: str=None, auth_plugin_list: str=None, session_time_zone: str=None,
set_db_replica: ReplicaMode=None, set_bind: str=None,
decfloat_round: DecfloatRound=None,
decfloat_traps: List[DecfloatTraps]=None
):
# Available options:
# AuthClient, WireCryptPlugin, Providers, ConnectionTimeout, WireCrypt,
# WireConpression, DummyPacketInterval, RemoteServiceName, RemoteServicePort,
# RemoteAuxPort, TcpNoNagle, IpcName, RemotePipeName, ClientBatchBuffer [FB4+]
#: Configuration override
self.config: Optional[str] = config
#: List of authentication plugins override
self.auth_plugin_list: str = auth_plugin_list
# Connect
#: Use trusted authentication
self.trusted_auth: bool = trusted_auth
#: User name
self.user: str = user
#: User password
self.password: str = password
#: User role
self.role: str = role
#: SQL Dialect for database connection
self.sql_dialect: int = sql_dialect
#: Character set for database connection
self.charset: str = charset
#: Connection timeout
self.timeout: Optional[int] = timeout
#: Dummy packet interval for this database connection
self.dummy_packet_interval: Optional[int] = dummy_packet_interval
#: Page cache size override for database connection
self.cache_size: int = cache_size
#: Disable garbage collection for database connection
self.no_gc: bool = no_gc
#: Disable database triggers for database connection
self.no_db_triggers: bool = no_db_triggers
#: Do not use linger for database connection
self.no_linger: bool = no_linger
#: Database filename passed in UTF8
self.utf8filename: bool = utf8filename
#: Scope for RDB$DB_KEY values
self.dbkey_scope: Optional[DBKeyScope] = dbkey_scope
#: Session time zone [Firebird 4]
self.session_time_zone: Optional[str] = session_time_zone
#: Set replica mode [Firebird 4]
self.set_db_replica: Optional[ReplicaMode] = set_db_replica
#: Set BIND [Firebird 4]
self.set_bind: Optional[str] = set_bind
#: Set DECFLOAT ROUND [Firebird 4]
self.decfloat_round: Optional[DecfloatRound] = decfloat_round
#: Set DECFLOAT TRAPS [Firebird 4]
self.decfloat_traps: Optional[List[DecfloatTraps]] = \
None if decfloat_traps is None else list(decfloat_traps)
# For db create
#: Database page size [db create only]
self.page_size: Optional[int] = page_size
#: Overwrite existing database [db create only]
self.overwrite: bool = overwrite
#: Number of pages in database cache [db create only]
self.db_buffers = None
#: Database cache size [db create only]
self.db_cache_size: Optional[int] = db_cache_size
#: Database write mode (True = sync/False = async) [db create only]
self.forced_writes: Optional[bool] = forced_writes
#: Database data page space usage (True = reserve space, False = Use all space) [db create only]
self.reserve_space: Optional[bool] = reserve_space
#: Database access mode (True = read-only/False = read-write) [db create only]
self.read_only: bool = read_only
#: Sweep interval for the database [db create only]
self.sweep_interval: Optional[int] = sweep_interval
#: SQL dialect for the database [db create only]
self.db_sql_dialect: Optional[int] = db_sql_dialect
#: Character set for the database [db create only]
self.db_charset: Optional[str] = db_charset
def clear(self) -> None:
"""Clear all information.
"""
self.config = None
# Connect
self.trusted_auth = False
self.user = None
self.password = None
self.role = None
self.sql_dialect = 3
self.charset = 'UTF8'
self.timeout = None
self.dummy_packet_interval = None
self.cache_size = None
self.no_gc = False
self.no_db_triggers = False
self.no_linger = False
self.utf8filename = False
self.dbkey_scope = None
self.session_time_zone = None
self.set_db_replica = None
self.set_bind = None
self.decfloat_round = None
self.decfloat_traps = None
# For db create
self.page_size = None
self.overwrite = False
self.db_buffers = None
self.forced_writes = None
self.reserve_space = None
self.page_size = None
self.read_only = False
self.sweep_interval = None
self.db_sql_dialect = None
self.db_charset = None
def parse_buffer(self, buffer: bytes) -> None:
"""Load information from DPB.
"""
_py_charset: str = CHARSET_MAP.get(self.charset, 'ascii')
self.clear()
with a.get_api().util.get_xpb_builder(XpbKind.DPB, buffer) as dpb:
while not dpb.is_eof():
tag = dpb.get_tag()
if tag == DPBItem.CONFIG:
self.config = dpb.get_string(encoding=_py_charset)
elif tag == DPBItem.AUTH_PLUGIN_LIST:
self.auth_plugin_list = dpb.get_string()
elif tag == DPBItem.TRUSTED_AUTH:
self.trusted_auth = True
elif tag == DPBItem.USER_NAME:
self.user = dpb.get_string(encoding=_py_charset)
elif tag == DPBItem.PASSWORD:
self.password = dpb.get_string(encoding=_py_charset)
elif tag == DPBItem.CONNECT_TIMEOUT:
self.timeout = dpb.get_int()
elif tag == DPBItem.DUMMY_PACKET_INTERVAL:
self.dummy_packet_interval = dpb.get_int()
elif tag == DPBItem.SQL_ROLE_NAME:
self.role = dpb.get_string(encoding=_py_charset)
elif tag == DPBItem.SQL_DIALECT:
self.sql_dialect = dpb.get_int()
elif tag == DPBItem.LC_CTYPE:
self.charset = dpb.get_string()
elif tag == DPBItem.NUM_BUFFERS:
self.cache_size = dpb.get_int()
elif tag == DPBItem.NO_GARBAGE_COLLECT:
self.no_gc = bool(dpb.get_int())
elif tag == DPBItem.UTF8_FILENAME:
self.utf8filename = bool(dpb.get_int())
elif tag == DPBItem.NO_DB_TRIGGERS:
self.no_db_triggers = bool(dpb.get_int())
elif tag == DPBItem.NOLINGER:
self.no_linger = bool(dpb.get_int())
elif tag == DPBItem.DBKEY_SCOPE:
self.dbkey_scope = DBKeyScope(dpb.get_int())
elif tag == DPBItem.PAGE_SIZE:
self.page_size = dpb.get_int()
elif tag == DPBItem.OVERWRITE:
self.overwrite = bool(dpb.get_int())
elif tag == DPBItem.SET_PAGE_BUFFERS:
self.db_cache_size = dpb.get_int()
elif tag == DPBItem.FORCE_WRITE:
self.forced_writes = bool(dpb.get_int())
elif tag == DPBItem.NO_RESERVE:
self.reserve_space = not bool(dpb.get_int())
elif tag == DPBItem.SET_DB_READONLY:
self.read_only = bool(dpb.get_int())
elif tag == DPBItem.SWEEP_INTERVAL:
self.sweep_interval = dpb.get_int()
elif tag == DPBItem.SET_DB_SQL_DIALECT:
self.db_sql_dialect = dpb.get_int()
elif tag == DPBItem.SET_DB_CHARSET:
self.db_charset = dpb.get_string()
elif tag == DPBItem.SESSION_TIME_ZONE:
self.session_time_zone = dpb.get_string()
elif tag == DPBItem.SET_DB_REPLICA:
self.set_db_replica = ReplicaMode(dpb.get_int())
elif tag == DPBItem.SET_BIND:
self.set_bind = dpb.get_string()
elif tag == DPBItem.DECFLOAT_ROUND:
self.decfloat_round = DecfloatRound(dpb.get_string())
elif tag == DPBItem.DECFLOAT_TRAPS:
self.decfloat_traps = [DecfloatTraps(v.strip())
for v in dpb.get_string().split(',')]
def get_buffer(self, *, for_create: bool = False) -> bytes:
"""Create DPB from stored information.
"""
_py_charset: str = CHARSET_MAP.get(self.charset, 'ascii')
with a.get_api().util.get_xpb_builder(XpbKind.DPB) as dpb:
if self.config is not None:
dpb.insert_string(DPBItem.CONFIG, self.config, encoding=_py_charset)
if self.trusted_auth:
dpb.insert_tag(DPBItem.TRUSTED_AUTH)
else:
if self.user:
dpb.insert_string(DPBItem.USER_NAME, self.user, encoding=_py_charset)
if self.password:
dpb.insert_string(DPBItem.PASSWORD, self.password, encoding=_py_charset)
if self.auth_plugin_list is not None:
dpb.insert_string(DPBItem.AUTH_PLUGIN_LIST, self.auth_plugin_list)
if self.timeout is not None:
dpb.insert_int(DPBItem.CONNECT_TIMEOUT, self.timeout)
if self.dummy_packet_interval is not None:
dpb.insert_int(DPBItem.DUMMY_PACKET_INTERVAL, self.dummy_packet_interval)
if self.role:
dpb.insert_string(DPBItem.SQL_ROLE_NAME, self.role, encoding=_py_charset)
if self.sql_dialect:
dpb.insert_int(DPBItem.SQL_DIALECT, self.sql_dialect)
if self.charset:
dpb.insert_string(DPBItem.LC_CTYPE, self.charset)
if for_create:
dpb.insert_string(DPBItem.SET_DB_CHARSET, self.charset)
if self.cache_size is not None:
dpb.insert_int(DPBItem.NUM_BUFFERS, self.cache_size)
if self.no_gc:
dpb.insert_int(DPBItem.NO_GARBAGE_COLLECT, 1)
if self.utf8filename:
dpb.insert_int(DPBItem.UTF8_FILENAME, 1)
if self.no_db_triggers:
dpb.insert_int(DPBItem.NO_DB_TRIGGERS, 1)
if self.no_linger:
dpb.insert_int(DPBItem.NOLINGER, 1)
if self.dbkey_scope is not None:
dpb.insert_int(DPBItem.DBKEY_SCOPE, self.dbkey_scope)
if self.session_time_zone is not None:
dpb.insert_string(DPBItem.SESSION_TIME_ZONE, self.session_time_zone)
if self.set_db_replica is not None:
dpb.insert_int(DPBItem.SET_DB_REPLICA, self.set_db_replica)
if self.set_bind is not None:
dpb.insert_string(DPBItem.SET_BIND, self.set_bind)
if self.decfloat_round is not None:
dpb.insert_string(DPBItem.DECFLOAT_ROUND, self.decfloat_round.value)
if self.decfloat_traps is not None:
dpb.insert_string(DPBItem.DECFLOAT_TRAPS, ','.join(e.value for e in
self.decfloat_traps))
if for_create:
if self.page_size is not None:
dpb.insert_int(DPBItem.PAGE_SIZE, self.page_size)
if self.overwrite:
dpb.insert_int(DPBItem.OVERWRITE, 1)
if self.db_cache_size is not None:
dpb.insert_int(DPBItem.SET_PAGE_BUFFERS, self.db_cache_size)
if self.forced_writes is not None:
dpb.insert_int(DPBItem.FORCE_WRITE, int(self.forced_writes))
if self.reserve_space is not None:
dpb.insert_int(DPBItem.NO_RESERVE, int(not self.reserve_space))
if self.read_only:
dpb.insert_int(DPBItem.SET_DB_READONLY, 1)
if self.sweep_interval is not None:
dpb.insert_int(DPBItem.SWEEP_INTERVAL, self.sweep_interval)
if self.db_sql_dialect is not None:
dpb.insert_int(DPBItem.SET_DB_SQL_DIALECT, self.db_sql_dialect)
if self.db_charset is not None:
dpb.insert_string(DPBItem.SET_DB_CHARSET, self.db_charset)
#
result = dpb.get_buffer()
return result
class SPB_ATTACH:
"""Service Parameter Buffer.
"""
def __init__(self, *, user: str = None, password: str = None, trusted_auth: bool = False,
config: str = None, auth_plugin_list: str = None, expected_db: str=None,
encoding: str='ascii', errors: str='strict', role: str=None):
self.encoding: str = encoding
self.errors: str = errors
self.user: str = user
self.password: str = password
self.trusted_auth: bool = trusted_auth
self.config: str = config
self.auth_plugin_list: str = auth_plugin_list
self.expected_db: str = expected_db
self.role: str = role
def clear(self) -> None:
"""Clear all information.
"""
self.user = None
self.password = None
self.trusted_auth = False
self.config = None
self.expected_db = None
def parse_buffer(self, buffer: bytes) -> None:
"""Load information from SPB_ATTACH.
"""
self.clear()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_ATTACH, buffer) as spb:
while not spb.is_eof():
tag = spb.get_tag()
if tag == SPBItem.CONFIG:
self.config = spb.get_string(encoding=self.encoding, errors=self.errors)
elif tag == SPBItem.AUTH_PLUGIN_LIST:
self.auth_plugin_list = spb.get_string()
elif tag == SPBItem.TRUSTED_AUTH:
self.trusted_auth = True
elif tag == SPBItem.USER_NAME:
self.user = spb.get_string(encoding=self.encoding, errors=self.errors)
elif tag == SPBItem.PASSWORD:
self.password = spb.get_string(encoding=self.encoding, errors=self.errors)
elif tag == SPBItem.SQL_ROLE_NAME:
self.role = spb.get_string(encoding=self.encoding, errors=self.errors)
elif tag == SPBItem.EXPECTED_DB:
self.expected_db = spb.get_string(encoding=self.encoding, errors=self.errors)
def get_buffer(self) -> bytes:
"""Create SPB_ATTACH from stored information.
"""
with a.get_api().util.get_xpb_builder(XpbKind.SPB_ATTACH) as spb:
if self.config is not None:
spb.insert_string(SPBItem.CONFIG, self.config, encoding=self.encoding,
errors=self.errors)
if self.trusted_auth:
spb.insert_tag(SPBItem.TRUSTED_AUTH)
else:
if self.user is not None:
spb.insert_string(SPBItem.USER_NAME, self.user, encoding=self.encoding,
errors=self.errors)
if self.password is not None:
spb.insert_string(SPBItem.PASSWORD, self.password,
encoding=self.encoding, errors=self.errors)
if self.role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, self.role, encoding=self.encoding,
errors=self.errors)
if self.auth_plugin_list is not None:
spb.insert_string(SPBItem.AUTH_PLUGIN_LIST, self.auth_plugin_list)
if self.expected_db is not None:
spb.insert_string(SPBItem.EXPECTED_DB, self.expected_db,
encoding=self.encoding, errors=self.errors)
result = spb.get_buffer()
return result
class Buffer(MemoryBuffer):
"""MemoryBuffer with extensions.
"""
def __init__(self, init: Union[int, bytes], size: int = None, *,
factory: Type[BufferFactory]=BytesBufferFactory,
max_size: Union[int, Sentinel]=UNLIMITED, byteorder: ByteOrder=ByteOrder.LITTLE):
super().__init__(init, size, factory=factory, eof_marker=isc_info_end,
max_size=max_size, byteorder=byteorder)
def seek_last_data(self) -> int:
"""Set the position in buffer to first non-zero byte when searched from
the end of buffer.
"""
self.pos = self.last_data
def get_tag(self) -> int:
"""Read 1 byte number (c_ubyte).
"""
return self.read_byte()
def rewind(self) -> None:
"""Set current position in buffer to beginning.
"""
self.pos = 0
def is_truncated(self) -> bool:
"""Return True when positioned on `isc_info_truncated` tag.
"""
return safe_ord(self.raw[self.pos]) == isc_info_truncated
class CBuffer(Buffer):
"""ctypes MemoryBuffer with extensions.
"""
def __init__(self, init: Union[int, bytes], size: int = None, *,
max_size: Union[int, Sentinel]=UNLIMITED, byteorder: ByteOrder=ByteOrder.LITTLE):
super().__init__(init, size, factory=CTypesBufferFactory, max_size=max_size, byteorder=byteorder)
class EventBlock:
"""Used internally by `EventCollector`.
"""
def __init__(self, queue, db_handle: a.FB_API_HANDLE, event_names: List[str]):
self.__first = True
def callback(result, length, updated):
memmove(result, updated, length)
self.__queue.put((_OP_RECORD_AND_REREGISTER, self))
return 0
self.__queue: PriorityQueue = weakref.proxy(queue)
self._db_handle: a.FB_API_HANDLE = db_handle
self._isc_status: a.ISC_STATUS_ARRAY = a.ISC_STATUS_ARRAY(0)
self.event_names: List[str] = list(event_names)
self.__results: a.RESULT_VECTOR = a.RESULT_VECTOR(0)
self.__closed: bool = False
self.__callback: a.ISC_EVENT_CALLBACK = a.ISC_EVENT_CALLBACK(callback)
self.event_buf = pointer(a.ISC_UCHAR(0))
self.result_buf = pointer(a.ISC_UCHAR(0))
self.buf_length: int = 0
self.event_id: a.ISC_LONG = a.ISC_LONG(0)
self.buf_length = a.api.isc_event_block(pointer(self.event_buf),
pointer(self.result_buf),
*[x.encode() for x in event_names])
def __del__(self):
if not self.__closed:
warn(f"EventBlock disposed without prior close()", ResourceWarning)
self.close()
def __lt__(self, other):
return self.event_id.value < other.event_id.value
def __wait_for_events(self) -> None:
a.api.isc_que_events(self._isc_status, self._db_handle, self.event_id,
self.buf_length, self.event_buf,
self.__callback, self.result_buf)
if a.db_api_error(self._isc_status): # pragma: no cover
self.close()
raise a.exception_from_status(DatabaseError, self._isc_status,
"Error while waiting for events.")
def _begin(self) -> None:
self.__wait_for_events()
def count_and_reregister(self) -> Dict[str, int]:
"""Count event occurences and re-register interest in further notifications.
"""
result = {}
a.api.isc_event_counts(self.__results, self.buf_length,
self.event_buf, self.result_buf)
if self.__first:
# Ignore the first call, it's for setting up the table
self.__first = False
self.__wait_for_events()
return None
for i in range(len(self.event_names)):
result[self.event_names[i]] = int(self.__results[i])
self.__wait_for_events()
return result
def close(self) -> None:
"""Close this block canceling managed events.
"""
if not self.__closed:
a.api.isc_cancel_events(self._isc_status, self._db_handle, self.event_id)
self.__closed = True
del self.__callback
if a.db_api_error(self._isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError, self._isc_status,
"Error while canceling events.")
def is_closed(self) -> bool:
"""Returns True if event block is closed.
"""
return self.__closed
class EventCollector:
"""Collects database event notifications.
Notifications of events are not accumulated until `.begin()` method is called.
From the moment the `.begin()` is called, notifications of any events that occur
will accumulate asynchronously within the conduit’s internal queue until the collector
is closed either explicitly (via the `.close()` method) or implicitly
(via garbage collection).
Note:
`EventCollector` implements context manager protocol to call method `.begin()`
and `.close()` automatically.
Example::
with connection.event_collector(['event_a', 'event_b']) as collector:
events = collector.wait()
process_events(events)
Important:
DO NOT create instances of this class directly! Use only
`Connection.event_collector` to get EventCollector instances.
"""
def __init__(self, db_handle: a.FB_API_HANDLE, event_names: Sequence[str]):
self._db_handle: a.FB_API_HANDLE = db_handle
self._isc_status: a.ISC_STATUS_ARRAY = a.ISC_STATUS_ARRAY(0)
self.__event_names: List[str] = list(event_names)
self.__events: Dict[str, int] = dict.fromkeys(self.__event_names, 0)
self.__event_blocks: List[EventBlock] = []
self.__closed: bool = False
self.__queue: PriorityQueue = PriorityQueue()
self.__events_ready: threading.Event = threading.Event()
self.__blocks: List[List[str]] = [[x for x in y if x] for y in itertools.zip_longest(*[iter(event_names)]*15)]
self.__initialized: bool = False
def __del__(self):
if not self.__closed:
warn(f"EventCollector disposed without prior close()", ResourceWarning)
self.close()
def __enter__(self):
self.begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def begin(self) -> None:
"""Starts listening for events.
Must be called directly or through context manager interface.
"""
def event_process(queue: PriorityQueue):
while True:
operation, data = queue.get()
if operation is _OP_RECORD_AND_REREGISTER:
events = data.count_and_reregister()
if events:
for key, value in events.items():
self.__events[key] += value
self.__events_ready.set()
elif operation is _OP_DIE:
return
self.__initialized = True
self.__process_thread = threading.Thread(target=event_process, args=(self.__queue,))
self.__process_thread.start()
for block_events in self.__blocks:
event_block = EventBlock(self.__queue, self._db_handle, block_events)
self.__event_blocks.append(event_block)
event_block._begin()
def wait(self, timeout: Union[int, float]=None) -> Dict[str, int]:
"""Wait for events.
Blocks the calling thread until at least one of the events occurs, or
the specified timeout (if any) expires.
Arguments:
timeout: Number of seconds (use a float to indicate fractions of
seconds). If not even one of the relevant events has
occurred after timeout seconds, this method will unblock
and return None. The default timeout is infinite.
Returns:
`None` if the wait timed out, otherwise a dictionary that maps
`event_name -> event_occurrence_count`.
Example::
>>> collector = connection.event_collector(['event_a', 'event_b'])
>>> collector.begin()
>>> collector.wait()
{
'event_a': 1,
'event_b': 0
}
In the example above `event_a` occurred once and `event_b` did not occur
at all.
Raises:
InterfaceError: When collector does not listen for events.
"""
if not self.__initialized:
raise InterfaceError("Event collection not initialized (begin() not called).")
if not self.__closed:
self.__events_ready.wait(timeout)
return self.__events.copy()
def flush(self) -> None:
"""Clear any event notifications that have accumulated in the collector’s
internal queue.
"""
if not self.__closed:
self.__events_ready.clear()
self.__events = dict.fromkeys(self.__event_names, 0)
def close(self) -> None:
"""Cancels the standing request for this collector to be notified of events.
After this method has been called, this EventCollector instance is useless,
and should be discarded.
"""
if not self.__closed:
self.__queue.put((_OP_DIE, self))
self.__process_thread.join()
for block in self.__event_blocks:
block.close()
self.__closed = True
def is_closed(self) -> bool:
"""Returns True if collector is closed.
"""
return self.__closed
class InfoProvider(ABC):
"""Abstract base class for embedded information providers.
Attributes:
response (CBuffer): Internal buffer for response packet acquired via Firebird API.
request (Buffer): Internal buffer for information request packet needed by Firebird API.
"""
def __init__(self, charset: str, buffer_size: int=256):
self._charset: str = charset
self.response: CBuffer = CBuffer(buffer_size)
self.request: Buffer = Buffer(10)
self._cache: Dict = {}
def _raise_not_supported(self) -> None:
raise NotSupportedError("Requested functionality is not supported by used Firebird version.")
@abstractmethod
def _close(self) -> None:
"""Close the information source.
"""
@abstractmethod
def _acquire(self, request: bytes) -> None:
"""Acquire information specified by parameter. Information must be stored in
`response` buffer.
Arguments:
request: Data specifying the required information.
"""
def _get_data(self, request: bytes, max_size: int=SHRT_MAX) -> None:
"""Helper function that aquires information specified by parameter into internal
`response` buffer. If information source couldn't store all required data because
the buffer is too small, this function tries to `.acquire()` the information again
with buffer of doubled size.
Arguments:
request: Data specifying the required information.
max_size: Maximum response size.
Raises:
InterfaceError: If information cannot be successfuly stored into buffer
of `max_size`, or response is ivalid.
"""
while True:
self._acquire(request)
if self.response.is_truncated():
if (buf_size := len(self.response.raw)) < max_size:
buf_size = min(buf_size * 2, max_size)
self.response.resize(buf_size)
continue
else: # pragma: no cover
raise InterfaceError("Response too large")
else:
break
self.response.seek_last_data()
if not self.response.is_eof(): # pragma: no cover
raise InterfaceError("Invalid response format")
self.response.rewind()
class EngineVersionProvider(InfoProvider):
"""Engine version provider for internal use by driver.
"""
def __init__(self, charset: str):
super().__init__(charset)
self.con = None
def _close(self) -> None:
pass
def _acquire(self, request: bytes) -> None:
"""Acquires information from associated attachment. Information is stored in native
format in `response` buffer.
Arguments:
request: Data specifying the required information.
"""
if isinstance(self.con(), Connection):
self.con()._att.get_info(request, self.response.raw)
else:
self.con()._svc.query(None, request, self.response.raw)
def get_server_version(self, con: Union[Connection, Server]) -> str:
self.con = con
info_code = DbInfoCode.FIREBIRD_VERSION if isinstance(con(), Connection) \
else SrvInfoCode.SERVER_VERSION
self._get_data(bytes([info_code]))
tag = self.response.get_tag()
if (tag != info_code.value):
if tag == isc_info_error: # pragma: no cover
raise InterfaceError("An error response was received")
else: # pragma: no cover
raise InterfaceError("Result code does not match request code")
if isinstance(con(), Connection):
self.response.read_byte() # Cluster length
self.response.read_short() # number of strings
verstr: str = self.response.read_pascal_string()
x = verstr.split()
if x[0].find('V') > 0:
(x, result) = x[0].split('V')
elif x[0].find('T') > 0: # pragma: no cover
(x, result) = x[0].split('T')
else: # pragma: no cover
# Unknown version
result = '0.0.0.0'
self.con = None
return result
def get_engine_version(self, con: Union[Connection, Server]) -> float:
x = self.get_server_version(con).split('.')
return float(f'{x[0]}.{x[1]}')
_engine_version_provider: EngineVersionProvider = EngineVersionProvider('utf8')
class DatabaseInfoProvider3(InfoProvider):
"""Provides access to information about attached database [Firebird 3+].
Important:
Do NOT create instances of this class directly! Use `Connection.info` property to
access the instance already bound to attached database.
"""
def __init__(self, connection: Connection):
super().__init__(connection._encoding)
self._con: Connection = weakref.ref(connection)
self._handlers: Dict[DbInfoCode, Callable] = \
{DbInfoCode.BASE_LEVEL: self.response.get_tag,
DbInfoCode.DB_ID: self.__db_id,
DbInfoCode.IMPLEMENTATION: self.__implementation,
DbInfoCode.IMPLEMENTATION_OLD: self.__implementation_old,
DbInfoCode.VERSION: self._info_string,
DbInfoCode.FIREBIRD_VERSION: self._info_string,
DbInfoCode.CRYPT_KEY: self._info_string,
DbInfoCode.USER_NAMES: self.__user_names,
DbInfoCode.ACTIVE_TRANSACTIONS: self.__tra_active,
DbInfoCode.LIMBO: self.__tra_limbo,
DbInfoCode.ALLOCATION: self.response.read_sized_int,
DbInfoCode.NO_RESERVE: self.response.read_sized_int,
DbInfoCode.DB_SQL_DIALECT: self.response.read_sized_int,
DbInfoCode.ODS_MINOR_VERSION: self.response.read_sized_int,
DbInfoCode.ODS_VERSION: self.response.read_sized_int,
DbInfoCode.PAGE_SIZE: self.response.read_sized_int,
DbInfoCode.CURRENT_MEMORY: self.response.read_sized_int,
DbInfoCode.FORCED_WRITES: self.response.read_sized_int,
DbInfoCode.MAX_MEMORY: self.response.read_sized_int,
DbInfoCode.NUM_BUFFERS: self.response.read_sized_int,
DbInfoCode.SWEEP_INTERVAL: self.response.read_sized_int,
DbInfoCode.ATTACHMENT_ID: self.response.read_sized_int,
DbInfoCode.FETCHES: self.response.read_sized_int,
DbInfoCode.MARKS: self.response.read_sized_int,
DbInfoCode.READS: self.response.read_sized_int,
DbInfoCode.WRITES: self.response.read_sized_int,
DbInfoCode.SET_PAGE_BUFFERS: self.response.read_sized_int,
DbInfoCode.DB_READ_ONLY: self.response.read_sized_int,
DbInfoCode.DB_SIZE_IN_PAGES: self.response.read_sized_int,
DbInfoCode.PAGE_ERRORS: self.response.read_sized_int,
DbInfoCode.RECORD_ERRORS: self.response.read_sized_int,
DbInfoCode.BPAGE_ERRORS: self.response.read_sized_int,
DbInfoCode.DPAGE_ERRORS: self.response.read_sized_int,
DbInfoCode.IPAGE_ERRORS: self.response.read_sized_int,
DbInfoCode.PPAGE_ERRORS: self.response.read_sized_int,
DbInfoCode.TPAGE_ERRORS: self.response.read_sized_int,
DbInfoCode.ATT_CHARSET: self.response.read_sized_int,
DbInfoCode.OLDEST_TRANSACTION: self.response.read_sized_int,
DbInfoCode.OLDEST_ACTIVE: self.response.read_sized_int,
DbInfoCode.OLDEST_SNAPSHOT: self.response.read_sized_int,
DbInfoCode.NEXT_TRANSACTION: self.response.read_sized_int,
DbInfoCode.ACTIVE_TRAN_COUNT: self.response.read_sized_int,
DbInfoCode.DB_CLASS: self.response.read_sized_int,
DbInfoCode.DB_PROVIDER: self.response.read_sized_int,
DbInfoCode.PAGES_USED: self.response.read_sized_int,
DbInfoCode.PAGES_FREE: self.response.read_sized_int,
DbInfoCode.CRYPT_KEY: self._info_string,
DbInfoCode.CRYPT_STATE: self.__crypt_state,
DbInfoCode.CONN_FLAGS: self.__con_state,
DbInfoCode.BACKOUT_COUNT: self.__tbl_perf_count,
DbInfoCode.DELETE_COUNT: self.__tbl_perf_count,
DbInfoCode.EXPUNGE_COUNT: self.__tbl_perf_count,
DbInfoCode.INSERT_COUNT: self.__tbl_perf_count,
DbInfoCode.PURGE_COUNT: self.__tbl_perf_count,
DbInfoCode.READ_IDX_COUNT: self.__tbl_perf_count,
DbInfoCode.READ_SEQ_COUNT: self.__tbl_perf_count,
DbInfoCode.UPDATE_COUNT: self.__tbl_perf_count,
DbInfoCode.CREATION_DATE: self.__creation_date,
DbInfoCode.PAGE_CONTENTS: self.response.read_bytes,
}
# Page size
self.__page_size = self.get_info(DbInfoCode.PAGE_SIZE) # prefetch it
# Get Firebird engine version
self.__version = _engine_version_provider.get_server_version(self._con)
x = self.__version.split('.')
self.__engine_version = float(f'{x[0]}.{x[1]}')
def __db_id(self) -> List:
result = []
self.response.read_short() # Cluster length
count = self.response.read_byte()
while count > 0:
result.append(self.response.read_pascal_string(encoding=self._charset))
count -= 1
return result
def __implementation(self) -> Tuple[ImpCPU, ImpOS, ImpCompiler, ImpFlags]:
self.response.read_short() # Cluster length
cpu_id = ImpCPU(self.response.read_byte())
os_id = ImpOS(self.response.read_byte())
compiler_id = ImpCompiler(self.response.read_byte())
flags = ImpFlags(self.response.read_byte())
return (cpu_id, os_id, compiler_id, flags)
def __implementation_old(self) -> Tuple[int, int]:
self.response.read_short() # Cluster length
impl_number = self.response.read_byte()
class_number = self.response.read_byte()
return (impl_number, class_number)
def _info_string(self) -> str:
self.response.read_byte() # Cluster length
self.response.read_short() # number of strings
return self.response.read_pascal_string()
def __user_names(self) -> Dict[str, str]:
self.response.rewind() # necessary to process names separated by info tag
usernames = []
while not self.response.is_eof():
self.response.get_tag() # DbInfoCode.USER_NAMES
self.response.read_short() # cluster length
usernames.append(self.response.read_pascal_string(encoding=self._charset))
# The client-exposed return value is a dictionary mapping
# username -> number of connections by that user.
result = {}
for name in usernames:
result[name] = result.get(name, 0) + 1
return result
def __tra_active(self) -> List:
result = []
while not self.response.is_eof():
self.response.get_tag() # DbInfoCode.ACTIVE_TRANSACTIONS
tid_size = self.response.read_short()
if tid_size == 4:
tra_id = self.response.read_int()
elif tid_size == 8:
tra_id = self.response.read_bigint()
else: # pragma: no cover
raise InterfaceError(f"Wrong transaction ID size {tid_size}")
result.append(tra_id)
return result
def __tra_limbo(self) -> List:
result = []
self.response.read_short() # Total data length
while not self.response.is_eof():
self.response.get_tag()
tid_size = self.response.read_short()
if tid_size == 4:
tra_id = self.response.read_int()
elif tid_size == 8:
tra_id = self.response.read_bigint()
else: # pragma: no cover
raise InterfaceError(f"Wrong transaction ID size {tid_size}")
result.append(tra_id)
return result
def __crypt_state(self) -> EncryptionFlag:
return EncryptionFlag(self.response.read_sized_int())
def __con_state(self) -> ConnectionFlag:
return ConnectionFlag(self.response.read_sized_int())
def __tbl_perf_count(self) -> Dict[int, int]:
result = {}
clen = self.response.read_short() # Cluster length
while clen > 0:
relation_id = self.response.read_short()
result[relation_id] = self.response.read_int()
clen -= 6
return result
def __creation_date(self) -> datetime.datetime:
value = self.response.read_bytes()
return datetime.datetime.combine(_util.decode_date(value[:4]),
_util.decode_time(value[4:]))
def _close(self) -> None:
"""Drops the association with attached database.
"""
self._con = None
def _acquire(self, request: bytes) -> None:
"""Acquires information from associated attachment. Information is stored in native
format in `response` buffer.
Arguments:
request: Data specifying the required information.
"""
self._con()._att.get_info(request, self.response.raw)
def get_info(self, info_code: DbInfoCode, page_number: int=None) -> Any:
"""Returns requested information from associated attachment.
Arguments:
info_code: A code specifying the required information.
page_number: A page number for `DbInfoCode.PAGE_CONTENTS` request. Ignored for other requests.
Returns:
The data type of returned value depends on information required.
"""
if info_code in self._cache:
return self._cache[info_code]
if info_code not in self._handlers:
raise NotSupportedError(f"Info code {info_code} not supported by engine version {self.__engine_version}")
self.response.clear()
request = bytes([info_code])
if info_code == DbInfoCode.PAGE_CONTENTS:
request += (4).to_bytes(2, 'little')
request += page_number.to_bytes(4, 'little')
if len(self.response.raw) < self.page_size + 10:
self.response.resize(self.page_size + 10)
self._get_data(request)
tag = self.response.get_tag()
if (request[0] != tag):
if info_code == DbInfoCode.ACTIVE_TRANSACTIONS:
# isc_info_active_transactions with no active transactions returns empty buffer
# and does not follow this rule
pass
elif tag == isc_info_error: # pragma: no cover
raise InterfaceError("An error response was received")
else: # pragma: no cover
raise InterfaceError("Result code does not match request code")
if info_code == DbInfoCode.ACTIVE_TRANSACTIONS:
# we'll rewind back, otherwise it will break the repeating cluster processing
self.response.rewind()
#
result = self._handlers[info_code]()
# cache
if info_code in (DbInfoCode.CREATION_DATE, DbInfoCode.DB_CLASS, DbInfoCode.DB_PROVIDER,
DbInfoCode.DB_SQL_DIALECT, DbInfoCode.ODS_MINOR_VERSION,
DbInfoCode.ODS_VERSION, DbInfoCode.PAGE_SIZE, DbInfoCode.VERSION,
DbInfoCode.FIREBIRD_VERSION, DbInfoCode.IMPLEMENTATION_OLD,
DbInfoCode.IMPLEMENTATION, DbInfoCode.DB_ID, DbInfoCode.BASE_LEVEL,
DbInfoCode.ATTACHMENT_ID):
self._cache[info_code] = result
return result
# Functions
def get_page_content(self, page_number: int) -> bytes:
"""Returns content of single database page.
Arguments:
page_number: Sequence number of database page to be fetched from server.
"""
return self.get_info(DbInfoCode.PAGE_CONTENTS, page_number)
def get_active_transaction_ids(self) -> List[int]:
"""Returns list of IDs of active transactions.
"""
return self.get_info(DbInfoCode.ACTIVE_TRANSACTIONS)
def get_active_transaction_count(self) -> int:
"""Returns number of active transactions.
"""
return self.get_info(DbInfoCode.ACTIVE_TRAN_COUNT)
def get_table_access_stats(self) -> List[TableAccessStats]:
"""Returns actual table access statistics.
"""
tables = {}
info_codes = [DbInfoCode.READ_SEQ_COUNT, DbInfoCode.READ_IDX_COUNT,
DbInfoCode.INSERT_COUNT, DbInfoCode.UPDATE_COUNT,
DbInfoCode.DELETE_COUNT, DbInfoCode.BACKOUT_COUNT,
DbInfoCode.PURGE_COUNT, DbInfoCode.EXPUNGE_COUNT]
#stats = self.get_info(info_codes)
for info_code in info_codes:
stat: Mapping = self.get_info(info_code)
for table, count in stat.items():
tables.setdefault(table, dict.fromkeys(info_codes))[info_code] = count
return [TableAccessStats(table, **{_i2name[code]:count
for code, count in tables[table].items()})
for table in tables]
def is_compressed(self) -> bool:
"""Returns True if connection to the server uses data compression.
"""
return ConnectionFlag.COMPRESSED in ConnectionFlag(self.get_info(DbInfoCode.CONN_FLAGS))
def is_encrypted(self) -> bool:
"""Returns True if connection to the server uses data encryption.
"""
return ConnectionFlag.ENCRYPTED in ConnectionFlag(self.get_info(DbInfoCode.CONN_FLAGS))
# Properties
@property
def id(self) -> int:
"""Attachment ID.
"""
return self.get_info(DbInfoCode.ATTACHMENT_ID)
@property
def charset(self) -> str:
"""Database character set.
"""
if -1 not in self._cache:
with transaction(self._con()._tra_qry, bypass=True):
with self._con()._ic.execute("SELECT RDB$CHARACTER_SET_NAME FROM RDB$DATABASE"):
self._cache[-1] = self._con()._ic.fetchone()[0].strip()
return self._cache[-1]
@property
def page_size(self) -> int:
"""Page size (in bytes).
"""
return self.__page_size
@property
def sql_dialect(self) -> int:
"""SQL dialect used by connected database.
"""
return self.get_info(DbInfoCode.DB_SQL_DIALECT)
@property
def name(self) -> str:
"""Database name (filename or alias).
"""
return self.get_info(DbInfoCode.DB_ID)[0]
@property
def site(self) -> str:
"""Database site name.
"""
return self.get_info(DbInfoCode.DB_ID)[1]
@property
def server_version(self) -> str:
"""Firebird server version (compatible with InterBase version).
"""
return self.get_info(DbInfoCode.VERSION)
@property
def firebird_version(self) -> str:
"""Firebird server version.
"""
return self.get_info(DbInfoCode.FIREBIRD_VERSION)
@property
def implementation(self) -> Implementation:
"""Implementation (old format).
"""
return Implementation(self.get_info(DbInfoCode.IMPLEMENTATION_OLD)[0])
@property
def provider(self) -> DbProvider:
"""Database Provider.
"""
return DbProvider(self.get_info(DbInfoCode.DB_PROVIDER))
@property
def db_class(self) -> DbClass:
"""Database Class.
"""
return DbClass(self.get_info(DbInfoCode.DB_CLASS))
@property
def creation_date(self) -> datetime.date:
"""Date when database was created.
"""
return self.get_info(DbInfoCode.CREATION_DATE)
@property
def ods(self) -> float:
"""Database On-Disk Structure version (<major>.<minor>).
"""
return float(f'{self.ods_version}.{self.ods_minor_version}')
@property
def ods_version(self) -> int:
"""Database On-Disk Structure MAJOR version.
"""
return self.get_info(DbInfoCode.ODS_VERSION)
@property
def ods_minor_version(self) -> int:
"""Database On-Disk Structure MINOR version.
"""
return self.get_info(DbInfoCode.ODS_MINOR_VERSION)
@property
def page_cache_size(self) -> int:
"""Size of page cache used by connection.
"""
return self.get_info(DbInfoCode.NUM_BUFFERS)
@property
def pages_allocated(self) -> int:
"""Number of pages allocated for database.
"""
return self.get_info(DbInfoCode.ALLOCATION)
@property
def size_in_pages(self) -> int:
"""Database size in pages.
"""
return self.get_info(DbInfoCode.DB_SIZE_IN_PAGES)
@property
def pages_used(self) -> int:
"""Number of database pages in active use.
"""
return self.get_info(DbInfoCode.PAGES_USED)
@property
def pages_free(self) -> int:
"""Number of free allocated pages in database.
"""
return self.get_info(DbInfoCode.PAGES_FREE)
@property
def sweep_interval(self) -> int:
"""Sweep interval.
"""
return self.get_info(DbInfoCode.SWEEP_INTERVAL)
@property
def space_reservation(self) -> DbSpaceReservation:
"""Data page space usage (USE_FULL or RESERVE).
"""
return DbSpaceReservation.USE_FULL if self.get_info(DbInfoCode.NO_RESERVE) else DbSpaceReservation.RESERVE
@property
def write_mode(self) -> DbWriteMode:
"""Database write mode (SYNC or ASYNC).
"""
return DbWriteMode.SYNC if self.get_info(DbInfoCode.FORCED_WRITES) else DbWriteMode.ASYNC
@property
def access_mode(self) -> DbAccessMode:
"""Database access mode (READ_ONLY or READ_WRITE).
"""
return DbAccessMode.READ_ONLY if self.get_info(DbInfoCode.DB_READ_ONLY) else DbAccessMode.READ_WRITE
@property
def reads(self) -> int:
"""Current I/O statistics - Reads from disk to page cache.
"""
return self.get_info(DbInfoCode.READS)
@property
def fetches(self) -> int:
"""Current I/O statistics - Fetches from page cache.
"""
return self.get_info(DbInfoCode.FETCHES)
@property
def cache_hit_ratio(self) -> int:
"""Cache hit ratio = 1 - (reads / fetches).
"""
return 1 - (self.reads / self.fetches)
@property
def writes(self) -> int:
"""Current I/O statistics - Writes from page cache to disk.
"""
return self.get_info(DbInfoCode.WRITES)
@property
def marks(self) -> int:
"""Current I/O statistics - Writes to page in cache.
"""
return self.get_info(DbInfoCode.MARKS)
@property
def current_memory(self) -> int:
"""Total amount of memory curretly used by database engine.
"""
return self.get_info(DbInfoCode.CURRENT_MEMORY)
@property
def max_memory(self) -> int:
"""Max. total amount of memory so far used by database engine.
"""
return self.get_info(DbInfoCode.MAX_MEMORY)
@property
def oit(self) -> int:
"""ID of Oldest Interesting Transaction.
"""
return self.get_info(DbInfoCode.OLDEST_TRANSACTION)
@property
def oat(self) -> int:
"""ID of Oldest Active Transaction.
"""
return self.get_info(DbInfoCode.OLDEST_ACTIVE)
@property
def ost(self) -> int:
"""ID of Oldest Snapshot Transaction.
"""
return self.get_info(DbInfoCode.OLDEST_SNAPSHOT)
@property
def next_transaction(self) -> int:
"""ID for next transaction.
"""
return self.get_info(DbInfoCode.NEXT_TRANSACTION)
@property
def version(self) -> str:
"""Firebird version as SEMVER string.
"""
return self.__version
@property
def engine_version(self) -> float:
"""Firebird version as <major>.<minor> float number.
"""
return self.__engine_version
class DatabaseInfoProvider(DatabaseInfoProvider3):
"""Provides access to information about attached database [Firebird 4+].
Important:
Do NOT create instances of this class directly! Use `Connection.info` property to
access the instance already bound to attached database.
"""
def __init__(self, connection: Connection):
super().__init__(connection)
self._handlers.update({
DbInfoCode.SES_IDLE_TIMEOUT_DB: self.response.read_sized_int,
DbInfoCode.SES_IDLE_TIMEOUT_ATT: self.response.read_sized_int,
DbInfoCode.SES_IDLE_TIMEOUT_RUN: self.response.read_sized_int,
DbInfoCode.STMT_TIMEOUT_DB: self.response.read_sized_int,
DbInfoCode.STMT_TIMEOUT_ATT: self.response.read_sized_int,
DbInfoCode.PROTOCOL_VERSION: self.response.read_sized_int,
DbInfoCode.CRYPT_PLUGIN: self._info_string,
DbInfoCode.CREATION_TIMESTAMP_TZ: self.__creation_tstz,
DbInfoCode.WIRE_CRYPT: self._info_string,
DbInfoCode.FEATURES: self.__features,
DbInfoCode.NEXT_ATTACHMENT: self.response.read_sized_int,
DbInfoCode.NEXT_STATEMENT: self.response.read_sized_int,
DbInfoCode.DB_GUID: self._info_string,
DbInfoCode.DB_FILE_ID: self._info_string,
DbInfoCode.REPLICA_MODE: self.__replica_mode,
})
def __creation_tstz(self) -> datetime.datetime:
value = self.response.read_bytes()
return _util.decode_timestamp_tz(value)
def __features(self) -> List[Features]:
value = self.response.read_bytes()
return [Features(x) for x in value]
def __replica_mode(self) -> ReplicaMode:
return ReplicaMode(self.response.read_sized_int())
@property
def idle_timeout(self) -> int:
"""Attachment idle timeout.
"""
return self._con()._att.get_idle_timeout()
@idle_timeout.setter
def __idle_timeout(self, value: int) -> None:
self._con()._att.set_idle_timeout(value)
@property
def statement_timeout(self) -> int:
"""Statement timeout.
"""
return self._con()._att.get_statement_timeout()
@statement_timeout.setter
def __statement_timeout(self, value: int) -> None:
self._con()._att.set_statement_timeout(value)
class Connection(LoggingIdMixin):
"""Connection to the database.
Note:
Implements context manager protocol to call `.close()` automatically.
Attributes:
default_tpb (bytes): Default Transaction parameter buffer for started transactions.
Default is set to SNAPSHOT isolation with WAIT lock resolution (infinite lock timeout).
"""
# PEP 249 (Python DB API 2.0) extension
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
def __init__(self, att: iAttachment, dsn: str, dpb: bytes=None, sql_dialect: int=3,
charset: str=None) -> None:
self._att: iAttachment = att
self.__str: str = f'Connection[{self._get_db_handle()}]'
self.__charset: str = charset
self.__precision_cache = {}
self.__sqlsubtype_cache = {}
self.__ecollectors: List[EventCollector] = []
self.__dsn: str = dsn
self.__sql_dialect: int = sql_dialect
self._encoding: str = CHARSET_MAP.get(charset, 'ascii')
self._att.encoding = self._encoding
self._dpb: bytes = dpb
#: Default TPB for newly created transaction managers
self.default_tpb: bytes = tpb(Isolation.SNAPSHOT)
self._transactions: List[TransactionManager] = []
self._statements: List[Statement] = []
#
self.__ev: float = None
self.__info: DatabaseInfoProvider = None
self._tra_main: TransactionManager = TransactionManager(self, self.default_tpb)
self._tra_main._logging_id_ = 'Transaction.Main'
self._tra_qry: TransactionManager = TransactionManager(self,
tpb(Isolation.READ_COMMITTED_RECORD_VERSION,
access_mode=TraAccessMode.READ))
self._tra_qry._logging_id_ = 'Transaction.Query'
# Cursor for internal use
self._ic = self.query_transaction.cursor()
self._ic._connection = weakref.proxy(self, self._ic._dead_con)
self._ic._logging_id_ = 'Cursor.internal'
# firebird.lib extensions
self.__schema = None
self.__monitor = None
self.__FIREBIRD_LIB__ = None
def __del__(self):
if not self.is_closed():
warn(f"Connection '{self.logging_id}' disposed without prior close()", ResourceWarning)
self._close()
self._close_internals()
self._att.detach()
def __enter__(self) -> Connection:
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
def __repr__(self):
return self.__str
def _get_db_handle(self) -> int:
isc_status = a.ISC_STATUS_ARRAY()
db_handle = a.FB_API_HANDLE(0)
api = a.get_api()
api.fb_get_database_handle(isc_status, db_handle, self._att)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Cursor._unpack_output:fb_get_database_handle()")
return db_handle.value
def __stmt_deleted(self, stmt) -> None:
self._statements.remove(stmt)
def _close(self) -> None:
if self.__schema is not None:
self.__schema._set_internal(False)
self.__schema.close()
if self.__monitor is not None:
self.__monitor._set_internal(False)
self.__monitor.close()
self._ic.close()
for collector in self.__ecollectors:
collector.close()
self.main_transaction._finish(DefaultAction.ROLLBACK)
self.query_transaction._finish(DefaultAction.ROLLBACK)
while self._transactions:
transaction = self._transactions.pop(0)
transaction.default_action = DefaultAction.ROLLBACK # Required by Python DB API 2.0
transaction.close()
while self._statements:
s = self._statements.pop()()
if s is not None:
s.free()
def _close_internals(self) -> None:
self.main_transaction.close()
self.query_transaction.close()
if self.__info is not None:
self.__info._close()
def _engine_version(self) -> float:
if self.__ev is None:
self.__ev = _engine_version_provider.get_engine_version(weakref.ref(self))
return self.__ev
def _prepare(self, sql: str, transaction: TransactionManager) -> Statement:
if _commit := not transaction.is_active():
transaction.begin()
stmt = self._att.prepare(transaction._tra, sql, self.__sql_dialect)
result = Statement(self, stmt, sql, self.__sql_dialect)
self._statements.append(weakref.ref(result, self.__stmt_deleted))
if _commit:
transaction.commit()
return result
def _determine_field_precision(self, meta: ItemMetadata) -> int:
if (not meta.relation) or (not meta.field):
# Either or both field name and relation name are not provided,
# so we cannot determine field precision. It's normal situation
# for example for queries with dynamically computed fields
return 0
# Special case for automatic RDB$DB_KEY fields.
if (meta.field in ['DB_KEY', 'RDB$DB_KEY']):
return 0
precision = self.__precision_cache.get((meta.relation, meta.field))
if precision is not None:
return precision
# First, try table
with transaction(self._tra_qry, bypass=True):
with self._ic.execute("SELECT FIELD_SPEC.RDB$FIELD_PRECISION"
" FROM RDB$FIELDS FIELD_SPEC,"
" RDB$RELATION_FIELDS REL_FIELDS"
" WHERE"
" FIELD_SPEC.RDB$FIELD_NAME ="
" REL_FIELDS.RDB$FIELD_SOURCE"
" AND REL_FIELDS.RDB$RELATION_NAME = ?"
" AND REL_FIELDS.RDB$FIELD_NAME = ?",
(meta.relation, meta.field)):
result = self._ic.fetchone()
if result is None:
# Next, try stored procedure output parameter
with self._ic.execute("SELECT FIELD_SPEC.RDB$FIELD_PRECISION"
" FROM RDB$FIELDS FIELD_SPEC,"
" RDB$PROCEDURE_PARAMETERS REL_FIELDS"
" WHERE"
" FIELD_SPEC.RDB$FIELD_NAME ="
" REL_FIELDS.RDB$FIELD_SOURCE"
" AND RDB$PROCEDURE_NAME = ?"
" AND RDB$PARAMETER_NAME = ?"
" AND RDB$PARAMETER_TYPE = 1",
(meta.relation, meta.field)):
result = self._ic.fetchone()
if result:
self.__precision_cache[(meta.relation, meta.field)] = result[0]
return result[0]
# We ran out of options
return 0
def _get_array_sqlsubtype(self, relation: bytes, column: bytes) -> Optional[int]:
subtype = self.__sqlsubtype_cache.get((relation, column))
if subtype is not None:
return subtype
with transaction(self._tra_qry, bypass=True):
with self._ic.execute("SELECT FIELD_SPEC.RDB$FIELD_SUB_TYPE"
" FROM RDB$FIELDS FIELD_SPEC, RDB$RELATION_FIELDS REL_FIELDS"
" WHERE"
" FIELD_SPEC.RDB$FIELD_NAME = REL_FIELDS.RDB$FIELD_SOURCE"
" AND REL_FIELDS.RDB$RELATION_NAME = ?"
" AND REL_FIELDS.RDB$FIELD_NAME = ?",
(relation, column)):
result = self._ic.fetchone()
if result:
self.__sqlsubtype_cache[(relation, column)] = result[0]
return result[0]
def drop_database(self) -> None:
"""Drops the connected database.
Note:
Closes all event collectors, transaction managers (with rollback) and statements
associated with this connection before attempt to drop the database.
Hooks:
Event `.ConnectionHook.DROPPED`: Executed after database is sucessfuly dropped.
Hook must have signature::
hook_func(connection: Connection) -> None
Any value returned by hook is ignored.
"""
self._close()
self._close_internals()
try:
self._att.drop_database()
finally:
self._att = None
for hook in get_callbacks(ConnectionHook.DROPPED, self):
hook(self)
def execute_immediate(self, sql: str) -> None:
"""Executes SQL statement.
Important:
The statement MUST NOT return any result. The statement is executed in the
context of `.main_transaction`.
Arguments:
sql: SQL statement to be executed.
"""
assert self._att is not None
self.main_transaction.execute_immediate(sql)
def event_collector(self, event_names: Sequence[str]) -> EventCollector:
"""Create new `EventCollector` instance for this connection.
Arguments:
event_names: Sequence of database event names to whom the collector should be subscribed.
"""
isc_status = a.ISC_STATUS_ARRAY()
db_handle = a.FB_API_HANDLE(0)
a.api.fb_get_database_handle(isc_status, db_handle, self._att)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Connection.get_events:fb_get_database_handle()")
conduit = EventCollector(db_handle, event_names)
self.__ecollectors.append(conduit)
return conduit
def close(self) -> None:
"""Close the connection and release all associated resources.
Closes all event collectors, transaction managers (with rollback) and statements
associated with this connection before attempt (see Hooks) to close the
connection itself.
Hooks:
Event `.ConnectionHook.DETACH_REQUEST`: Executed before connection
is closed. Hook must have signature::
hook_func(connection: Connection) -> bool
.. note::
If any hook function returns True, connection is NOT closed.
Event `.ConnectionHook.CLOSED`: Executed after connection is closed.
Hook must have signature::
hook_func(connection: Connection) -> None
Any value returned by hook is ignored.
Important:
Closed connection SHALL NOT be used anymore.
"""
if not self.is_closed():
retain = False
try:
self._close()
except DatabaseError:
self._att = None
raise
for hook in get_callbacks(ConnectionHook.DETACH_REQUEST, self):
ret = hook(self)
if ret and not retain:
retain = True
#
if not retain:
try:
self._close_internals()
self._att.detach()
finally:
self._att = None
for hook in get_callbacks(ConnectionHook.CLOSED, self):
hook(self)
def transaction_manager(self, default_tpb: bytes=None,
default_action: DefaultAction=DefaultAction.COMMIT) -> TransactionManager:
"""Create new `TransactionManager` instance for this connection.
Arguments:
default_tpb: Default Transaction parameter buffer.
default_action: Default action to be performed on implicit transaction end.
"""
assert self._att is not None
transaction = TransactionManager(self, default_tpb if default_tpb else self.default_tpb,
default_action)
self._transactions.append(transaction)
return transaction
def begin(self, tpb: bytes=None) -> None:
"""Starts new transaction managed by `.main_transaction`.
Arguments:
tpb: Transaction parameter buffer with transaction parameters. If not specified,
the `.default_tpb` is used.
"""
assert self._att is not None
self.main_transaction.begin(tpb)
def savepoint(self, name: str) -> None:
"""Creates a new savepoint for transaction managed by `.main_transaction`.
Arguments:
name: Name for the savepoint
"""
assert self._att is not None
return self.main_transaction.savepoint(name)
def commit(self, *, retaining: bool=False) -> None:
"""Commits the transaction managed by `.main_transaction`.
Arguments:
retaining: When True, the transaction context is retained after commit.
"""
assert self._att is not None
self.main_transaction.commit(retaining=retaining)
def rollback(self, *, retaining: bool=False, savepoint: str=None) -> None:
"""Rolls back the transaction managed by `.main_transaction`.
Arguments:
retaining: When True, the transaction context is retained after rollback.
savepoint: When specified, the transaction is rolled back to savepoint with given name.
"""
assert self._att is not None
self.main_transaction.rollback(retaining=retaining, savepoint=savepoint)
def cursor(self) -> Cursor:
"""Returns new `Cursor` instance associated with `.main_transaction`.
"""
assert self._att is not None
return self.main_transaction.cursor()
def ping(self) -> None:
"""Checks connection status. If test fails the only operation possible
with connection is to close it.
Raises:
DatabaseError: When connection is dead.
"""
assert self._att is not None
self._att.ping()
def is_active(self) -> bool:
"""Returns True if `.main_transaction` has active transaction.
"""
return self._tra_main.is_active()
def is_closed(self) -> bool:
"""Returns True if connection to the database is closed.
Important:
Closed connection SHALL NOT be used anymore.
"""
return self._att is None
@property
def dsn(self) -> str:
"""Connection string.
"""
return self.__dsn
@property
def info(self) -> Union[DatabaseInfoProvider3, DatabaseInfoProvider]:
"""Access to various information about attached database.
"""
if self.__info is None:
self.__info = DatabaseInfoProvider(self) if self._engine_version() >= 4.0 \
else DatabaseInfoProvider3(self)
return self.__info
@property
def charset(self) -> str:
"""Connection character set.
"""
return self.__charset
@property
def sql_dialect(self) -> int:
"""Connection SQL dialect.
"""
return self.__sql_dialect
@property
def main_transaction(self) -> TransactionManager:
"""Main transaction manager for this connection.
"""
return self._tra_main
@property
def query_transaction(self) -> TransactionManager:
"""Transaction manager for Read-committed Read-only query transactions.
"""
return self._tra_qry
@property
def transactions(self) -> List[TransactionManager]:
"""List of all transaction managers associated with connection.
Note:
The first two are always `.main_transaction` and `.query_transaction` managers.
"""
result = [self.main_transaction, self.query_transaction]
result.extend(self._transactions)
return result
@property
def schema(self) -> 'firebird.lib.schema.Schema':
"""Access to database schema. Requires firebird.lib package.
"""
if self.__schema is None:
import firebird.lib.schema
self.__schema = firebird.lib.schema.Schema()
self.__schema.bind(self)
self.__schema._set_internal(True)
return self.__schema
@property
def monitor(self) -> 'firebird.lib.monitor.Monitor':
"""Access to database monitoring tables. Requires firebird.lib package.
"""
if self.__monitor is None:
import firebird.lib.monitor
self.__monitor = firebird.lib.monitor.Monitor(self)
self.__monitor._set_internal(True)
return self.__monitor
def tpb(isolation: Isolation, lock_timeout: int=-1, access_mode: TraAccessMode=TraAccessMode.WRITE) -> bytes:
"""Helper function to costruct simple TPB.
Arguments:
isolation: Isolation level.
lock_timeout: Lock timeout (-1 = Infinity)
access: Access mode.
"""
return TPB(isolation=isolation, lock_timeout=lock_timeout, access_mode=access_mode).get_buffer()
def _connect_helper(dsn: str, host: str, port: str, database: str, protocol: NetProtocol) -> str:
if ((not dsn and not host and not database) or
(dsn and (host or database)) or
(host and not database)):
raise InterfaceError("Must supply one of:\n"
" 1. keyword argument dsn='host:/path/to/database'\n"
" 2. both keyword arguments host='host' and"
" database='/path/to/database'\n"
" 3. only keyword argument database='/path/to/database'")
if not dsn:
if protocol is not None:
dsn = f'{protocol.name.lower()}://'
if host and port:
dsn += f'{host}:{port}/'
elif host:
dsn += f'{host}/'
else:
dsn = ''
if host and host.startswith('\\\\'): # Windows Named Pipes
if port:
dsn += f'{host}@{port}\\'
else:
dsn += f'{host}\\'
elif host and port:
dsn += f'{host}/{port}:'
elif host:
dsn += f'{host}:'
dsn += database
return dsn
def __make_connection(create: bool, dsn: str, utf8filename: bool, dpb: bytes,
sql_dialect: int, charset: str,
crypt_callback: iCryptKeyCallbackImpl) -> Connection:
with a.get_api().master.get_dispatcher() as provider:
if crypt_callback is not None:
provider.set_dbcrypt_callback(crypt_callback)
if create:
att = provider.create_database(dsn, dpb, 'utf-8' if utf8filename else FS_ENCODING)
con = Connection(att, dsn, dpb, sql_dialect, charset)
else:
con = None
for hook in get_callbacks(ConnectionHook.ATTACH_REQUEST, Connection):
try:
con = hook(dsn, dpb)
except Exception as e:
raise InterfaceError("Error in DATABASE_ATTACH_REQUEST hook.", *e.args) from e
if con is not None:
break
if con is None:
att = provider.attach_database(dsn, dpb, 'utf-8' if utf8filename else FS_ENCODING)
con = Connection(att, dsn, dpb, sql_dialect, charset)
for hook in get_callbacks(ConnectionHook.ATTACHED, con):
hook(con)
return con
def connect(database: str, *, user: str=None, password: str=None, role: str=None,
no_gc: bool=None, no_db_triggers: bool=None, dbkey_scope: DBKeyScope=None,
crypt_callback: iCryptKeyCallbackImpl=None, charset: str=None,
auth_plugin_list: str=None, session_time_zone: str=None) -> Connection:
"""Establishes a connection to the database.
Arguments:
database: DSN or Database configuration name.
user: User name.
password: User password.
role: User role.
no_gc: Do not perform garbage collection for this connection.
no_db_triggers: Do not execute database triggers for this connection.
dbkey_scope: DBKEY scope override for connection.
crypt_callback: Callback that provides encryption key for the database.
charset: Character set for connection.
auth_plugin_list: List of authentication plugins override
session_time_zone: Session time zone [Firebird 4]
Hooks:
Event `.ConnectionHook.ATTACH_REQUEST`: Executed after all parameters
are preprocessed and before `Connection` is created. Hook
must have signature::
hook_func(dsn: str, dpb: bytes) -> Optional[Connection]
Hook may return `Connection` instance or None.
First instance returned by any hook will become the return value
of this function and other hooks are not called.
Event `.ConnectionHook.ATTACHED`: Executed before `Connection` instance is
returned. Hook must have signature::
hook_func(connection: Connection) -> None
Any value returned by hook is ignored.
"""
db_config = driver_config.get_database(database)
if db_config is None:
db_config = driver_config.db_defaults
else:
database = db_config.database.value
if db_config.server.value is None:
srv_config = driver_config.server_defaults
else:
srv_config = driver_config.get_server(db_config.server.value)
if srv_config is None:
raise ValueError(f"Configuration for server '{db_config.server.value}' not found")
if user is None:
user = db_config.user.value
if user is None:
user = srv_config.user.value
if password is None:
password = db_config.password.value
if password is None:
password = srv_config.password.value
if role is None:
role = db_config.role.value
if charset is None:
charset = db_config.charset.value
if charset:
charset = charset.upper()
if auth_plugin_list is None:
auth_plugin_list = db_config.auth_plugin_list.value
if session_time_zone is None:
session_time_zone = db_config.session_time_zone.value
dsn = _connect_helper(db_config.dsn.value, srv_config.host.value, srv_config.port.value,
database, db_config.protocol.value)
dpb = DPB(user=user, password=password, role=role, trusted_auth=db_config.trusted_auth.value,
sql_dialect=db_config.sql_dialect.value, timeout=db_config.timeout.value,
charset=charset, cache_size=db_config.cache_size.value,
no_linger=db_config.no_linger.value, utf8filename=db_config.utf8filename.value,
no_gc=no_gc, no_db_triggers=no_db_triggers, dbkey_scope=dbkey_scope,
dummy_packet_interval=db_config.dummy_packet_interval.value,
config=db_config.config.value, auth_plugin_list=auth_plugin_list,
session_time_zone=session_time_zone, set_bind=db_config.set_bind.value,
decfloat_round=db_config.decfloat_round.value,
decfloat_traps=db_config.decfloat_traps.value)
return __make_connection(False, dsn, db_config.utf8filename.value, dpb.get_buffer(),
db_config.sql_dialect.value, charset, crypt_callback)
def create_database(database: str, *, user: str=None, password: str=None, role: str=None,
no_gc: bool=None, no_db_triggers: bool=None, dbkey_scope: DBKeyScope=None,
crypt_callback: iCryptKeyCallbackImpl=None, charset: str=None,
overwrite: bool=False, auth_plugin_list=None,
session_time_zone: str=None) -> Connection:
"""Creates new database.
Arguments:
database: DSN or Database configuration name.
user: User name.
password: User password.
role: User role.
no_gc: Do not perform garbage collection for this connection.
no_db_triggers: Do not execute database triggers for this connection.
dbkey_scope: DBKEY scope override for connection.
crypt_callback: Callback that provides encryption key for the database.
charset: Character set for connection.
overwrite: Overwite the existing database.
auth_plugin_list: List of authentication plugins override
session_time_zone: Session time zone [Firebird 4]
Hooks:
Event `.ConnectionHook.ATTACHED`: Executed before `Connection` instance is
returned. Hook must have signature::
hook_func(connection: Connection) -> None
Any value returned by hook is ignored.
"""
db_config = driver_config.get_database(database)
if db_config is None:
db_config = driver_config.db_defaults
db_config.database.value = database
if db_config.server.value is None:
srv_config = driver_config.server_defaults
else:
srv_config = driver_config.get_server(db_config.server.value)
if srv_config is None:
raise ValueError(f"Configuration for server '{db_config.server.value}' not found")
else:
if db_config.server.value is None:
srv_config = driver_config.server_defaults
else:
srv_config = driver_config.get_server(db_config.server.value)
if srv_config is None:
raise ValueError(f"Configuration for server '{db_config.server.value}' not found")
if user is None:
user = db_config.user.value
if password is None:
password = db_config.password.value
if role is None:
role = db_config.role.value
if charset is None:
charset = db_config.charset.value
if charset:
charset = charset.upper()
if auth_plugin_list is None:
auth_plugin_list = db_config.auth_plugin_list.value
if session_time_zone is None:
session_time_zone = db_config.session_time_zone.value
dsn = _connect_helper(db_config.dsn.value, srv_config.host.value, srv_config.port.value,
db_config.database.value, db_config.protocol.value)
dpb = DPB(user=user, password=password, role=role, trusted_auth=db_config.trusted_auth.value,
sql_dialect=db_config.db_sql_dialect.value, timeout=db_config.timeout.value,
charset=charset, cache_size=db_config.cache_size.value,
no_linger=db_config.no_linger.value, utf8filename=db_config.utf8filename.value,
no_gc=no_gc, no_db_triggers=no_db_triggers, dbkey_scope=dbkey_scope,
dummy_packet_interval=db_config.dummy_packet_interval.value,
config=db_config.config.value, auth_plugin_list=auth_plugin_list,
session_time_zone=session_time_zone, set_bind=db_config.set_bind.value,
decfloat_round=db_config.decfloat_round.value,
decfloat_traps=db_config.decfloat_traps.value,
overwrite=overwrite, db_cache_size=db_config.db_cache_size.value,
forced_writes=db_config.forced_writes.value, page_size=db_config.page_size.value,
reserve_space=db_config.reserve_space.value, sweep_interval=db_config.sweep_interval.value,
db_sql_dialect=db_config.db_sql_dialect.value, db_charset=db_config.db_charset.value)
return __make_connection(True, dsn, db_config.utf8filename.value,
dpb.get_buffer(for_create=True), db_config.sql_dialect.value,
charset, crypt_callback)
class TransactionInfoProvider3(InfoProvider):
"""Provides access to information about transaction [Firebird 3+].
Important:
Do NOT create instances of this class directly! Use `TransactionManager.info`
property to access the instance already bound to transaction context.
"""
def __init__(self, charset: str, tra: TransactionManager):
super().__init__(charset)
self._mngr: TransactionManager = weakref.ref(tra)
self._handlers: Dict[DbInfoCode, Callable] = \
{TraInfoCode.ISOLATION: self.__isolation,
TraInfoCode.ACCESS: self.__access,
TraInfoCode.DBPATH: self.response.read_sized_string,
TraInfoCode.LOCK_TIMEOUT: self.__lock_timeout,
TraInfoCode.ID: self.response.read_sized_int,
TraInfoCode.OLDEST_INTERESTING: self.response.read_sized_int,
TraInfoCode.OLDEST_SNAPSHOT: self.response.read_sized_int,
TraInfoCode.OLDEST_ACTIVE: self.response.read_sized_int,
}
def __isolation(self) -> Isolation:
cnt = self.response.read_short()
if cnt == 1:
# The value is `TraInfoIsolation` that maps to `Isolation`
return Isolation(self.response.read_byte())
else:
# The values are `TraInfoIsolation` + `TraInfoReadCommitted` that maps to `Isolation`
return Isolation(self.response.read_byte() + self.response.read_byte())
def __access(self) -> TraInfoAccess:
return TraInfoAccess(self.response.read_sized_int())
def __lock_timeout(self) -> int:
return self.response.read_sized_int(signed=True)
def _acquire(self, request: bytes) -> None:
assert self._mngr is not None
if not self._mngr().is_active():
raise InterfaceError("TransactionManager is not active")
self._mngr()._tra.get_info(request, self.response.raw)
def _close(self) -> None:
self._mngr = None
def get_info(self, info_code: TraInfoCode) -> Any:
if info_code not in self._handlers:
raise NotSupportedError(f"Info code {info_code} not supported by engine version {self._mngr()._connection()._engine_version()}")
request = bytes([info_code])
self._get_data(request)
tag = self.response.get_tag()
if (request[0] != tag):
if tag == isc_info_error: # pragma: no cover
raise InterfaceError("An error response was received")
else: # pragma: no cover
raise InterfaceError("Result code does not match request code")
#
return self._handlers[info_code]()
# Functions
def is_read_only(self) -> bool:
"""Returns True if transaction is Read Only.
"""
return self.get_info(TraInfoCode.ACCESS) == TraInfoAccess.READ_ONLY
# Properties
@property
def id(self) -> int:
"""Transaction ID.
"""
return self.get_info(TraInfoCode.ID)
@property
def oit(self) -> int:
"""ID of Oldest Interesting Transaction at the time this transaction started.
"""
return self.get_info(TraInfoCode.OLDEST_INTERESTING)
@property
def oat(self) -> int:
"""ID of Oldest Active Transaction at the time this transaction started.
"""
return self.get_info(TraInfoCode.OLDEST_ACTIVE)
@property
def ost(self) -> int:
"""ID of Oldest Snapshot Transaction at the time this transaction started.
"""
return self.get_info(TraInfoCode.OLDEST_SNAPSHOT)
@property
def isolation(self) -> Isolation:
"""Isolation level.
"""
return self.get_info(TraInfoCode.ISOLATION)
@property
def lock_timeout(self) -> int:
"""Lock timeout.
"""
return self.get_info(TraInfoCode.LOCK_TIMEOUT)
@property
def database(self) -> str:
"""Database filename.
"""
return self.get_info(TraInfoCode.DBPATH)
@property
def snapshot_number(self) -> int:
"""Snapshot number for this transaction.
Raises:
NotSupportedError: Requires Firebird 4+
"""
self._raise_not_supported()
class TransactionInfoProvider(TransactionInfoProvider3):
"""Provides access to information about transaction [Firebird 4+].
Important:
Do NOT create instances of this class directly! Use `TransactionManager.info`
property to access the instance already bound to transaction context.
"""
def __init__(self, charset: str, tra: TransactionManager):
super().__init__(charset, tra)
self._handlers.update({TraInfoCode.SNAPSHOT_NUMBER: self.response.read_sized_int,})
@property
def snapshot_number(self) -> int:
"""Snapshot number for this transaction.
"""
return self.get_info(TraInfoCode.SNAPSHOT_NUMBER)
class TransactionManager(LoggingIdMixin):
"""Transaction manager.
Note:
Implements context manager protocol to call `.close()` automatically.
Attributes:
default_tpb (bytes): Default Transaction parameter buffer.
default_action (DefaultAction): Default action for implicit transaction end.
"""
def __init__(self, connection: Connection, default_tpb: bytes,
default_action: DefaultAction=DefaultAction.COMMIT):
self._connection: Callable[[], Connection] = weakref.ref(connection, self.__dead_con)
#: Default Transaction Parameter Block used to start transaction
self.default_tpb: bytes = default_tpb
#: Default action (commit/rollback) to be performed when transaction is closed.
self.default_action: DefaultAction = default_action
self.__info: Union[TransactionInfoProvider, TransactionInfoProvider3] = None
self._cursors: List = [] # Weak references to cursors
self._tra: iTransaction = None
self.__closed: bool = False
self._logging_id_ = 'Transaction'
def __enter__(self) -> TransactionManager:
self.begin()
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
def __del__(self):
if self._tra is not None:
warn(f"Transaction '{self.logging_id}' disposed while active", ResourceWarning)
self._finish()
def __dead_con(self, obj) -> None:
self._connection = None
def _close_cursors(self) -> None:
for cursor in self._cursors:
c = cursor()
if c:
c.close()
def _cursor_deleted(self, obj) -> None:
self._cursors.remove(obj)
def _finish(self, default_action: DefaultAction=None) -> None:
try:
if self._tra is not None:
if default_action is None:
default_action = self.default_action
if default_action == DefaultAction.COMMIT:
self.commit()
else:
self.rollback()
finally:
self._tra = None
def close(self) -> None:
"""Close the transaction manager and release all associated resources.
Important:
Closed instance SHALL NOT be used anymore.
"""
if not self.__closed:
try:
self._finish()
finally:
con = self._connection()
if con is not None and self in con._transactions:
con._transactions.remove(self)
self._connection = None
self.__closed = True
if self.__info is not None:
self.__info._close()
def execute_immediate(self, sql: str) -> None:
"""Executes SQL statement. The statement MUST NOT return any result.
Arguments:
sql: SQL statement to be executed.
"""
assert not self.__closed
if not self.is_active():
self.begin()
self._connection()._att.execute(self._tra, sql, self._connection().sql_dialect)
def begin(self, tpb: bytes=None) -> None:
"""Starts new transaction managed by this instance.
Arguments:
tpb: Transaction parameter buffer with transaction's parameters. If not specified,
the `.default_tpb` is used.
"""
assert not self.__closed
self._finish() # Make sure that previous transaction (if any) is ended
self._tra = self._connection()._att.start_transaction(tpb if tpb else self.default_tpb)
def commit(self, *, retaining: bool=False) -> None:
"""Commits the transaction managed by this instance.
Arguments:
retaining: When True, the transaction context is retained after commit.
"""
assert not self.__closed
assert self.is_active()
if retaining:
self._tra.commit_retaining()
else:
self._close_cursors()
self._tra.commit()
if not retaining:
self._tra = None
def rollback(self, *, retaining: bool=False, savepoint: str=None) -> None:
"""Rolls back the transaction managed by this instance.
Arguments:
retaining: When True, the transaction context is retained after rollback.
savepoint: When specified, the transaction is rolled back to savepoint with given name.
Raises:
InterfaceError: When both retaining and savepoint parameters are specified.
"""
assert not self.__closed
assert self.is_active()
if retaining and savepoint:
raise InterfaceError("Can't rollback to savepoint while retaining context")
if savepoint:
self.execute_immediate(f'rollback to {savepoint}')
else:
if retaining:
self._tra.rollback_retaining()
else:
self._close_cursors()
self._tra.rollback()
if not retaining:
self._tra = None
def savepoint(self, name: str) -> None:
"""Creates a new savepoint for transaction managed by this instance.
Arguments:
name: Name for the savepoint
"""
self.execute_immediate(f'SAVEPOINT {name}')
def cursor(self) -> Cursor:
"""Returns new `Cursor` instance associated with this instance.
"""
assert not self.__closed
cur = Cursor(self._connection(), self)
self._cursors.append(weakref.ref(cur, self._cursor_deleted))
return cur
def is_active(self) -> bool:
"""Returns True if transaction is active.
"""
return self._tra is not None
def is_closed(self) -> bool:
"""Returns True if this transaction manager is closed.
"""
return self.__closed
# Properties
@property
def info(self) -> Union[TransactionInfoProvider3, TransactionInfoProvider]:
"""Access to various information about active transaction.
"""
if self.__info is None:
cls = TransactionInfoProvider if self._connection()._engine_version() >= 4.0 \
else TransactionInfoProvider3
self.__info = cls(self._connection()._encoding, self)
return self.__info
@property
def log_context(self) -> Connection:
if self._connection is None:
return 'Connection.GC'
return self._connection()
@property
def cursors(self) -> List[Cursor]:
"""Cursors associated with this transaction.
"""
return [x() for x in self._cursors]
class DistributedTransactionManager(TransactionManager):
"""Manages distributed transaction over multiple connections that use two-phase
commit protocol.
Note:
Implements context manager protocol to call `.close()` automatically.
Attributes:
default_tpb (bytes): Default Transaction parameter buffer
default_action (DefaultAction): Default action for implicit transaction end
"""
def __init__(self, connections: Sequence[Connection], default_tpb: bytes=None,
default_action: DefaultAction=DefaultAction.COMMIT):
self._connections: List[Connection] = list(connections)
self.default_tpb: bytes = default_tpb if default_tpb is not None else tpb(Isolation.SNAPSHOT)
self.default_action: DefaultAction = default_action
self._cursors: List = [] # Weak references to cursors
self._tra: iTransaction = None
self._dtc: iDtc = _master.get_dtc()
self.__closed: bool = False
self._logging_id_ = 'DTransaction'
def close(self) -> None:
"""Close the distributed transaction manager and release all associated
resources.
Important:
Closed instance SHALL NOT be used anymore.
"""
if not self.__closed:
try:
self._finish()
finally:
self._connections.clear()
self.__closed = True
def execute_immediate(self, sql: str) -> None:
"""Executes SQL statement on all connections in distributed transaction.
The statement MUST NOT return any result.
Arguments:
sql: SQL statement to be executed.
"""
assert not self.__closed
if not self.is_active():
self.begin()
for connection in self._connections:
connection._att.execute(self._tra, sql, connection.sql_dialect)
def begin(self, tpb: bytes=None) -> None:
"""Starts new distributed transaction managed by this instance.
Arguments:
tpb: Transaction parameter buffer with transaction's parameters. If not specified,
the `.default_tpb` is used.
"""
assert not self.__closed
self._finish() # Make sure that previous transaction (if any) is ended
with self._dtc.start_builder() as builder:
for con in self._connections:
builder.add_with_tpb(con._att, tpb if tpb else self.default_tpb)
self._tra = builder.start()
def prepare(self) -> None:
"""Manually triggers the first phase of a two-phase commit (2PC).
Note:
Direct use of this method is optional; if preparation is not triggered
manually, it will be performed implicitly by `.commit()` in a 2PC.
"""
assert not self.__closed
assert self.is_active()
self._tra.prepare()
def commit(self, *, retaining: bool=False) -> None:
"""Commits the distributed transaction managed by this instance.
Arguments:
retaining: When True, the transaction context is retained after commit.
"""
assert not self.__closed
assert self.is_active()
if retaining:
self._tra.commit_retaining()
else:
self._close_cursors()
self._tra.commit()
if not retaining:
self._tra = None
def rollback(self, *, retaining: bool=False, savepoint: str=None) -> None:
"""Rolls back the distributed transaction managed by this instance.
Arguments:
retaining: When True, the transaction context is retained after rollback.
savepoint: When specified, the transaction is rolled back to savepoint with given name.
Raises:
InterfaceError: When both retaining and savepoint parameters are specified.
"""
assert not self.__closed
assert self.is_active()
if retaining and savepoint:
raise InterfaceError("Can't rollback to savepoint while retaining context")
if savepoint:
self.execute_immediate(f'rollback to {savepoint}')
else:
if retaining:
self._tra.rollback_retaining()
else:
self._close_cursors()
self._tra.rollback()
if not retaining:
self._tra = None
def savepoint(self, name: str) -> None:
"""Creates a new savepoint for distributed transaction managed by this instance.
Arguments:
name: Name for the savepoint
"""
self.execute_immediate(f'SAVEPOINT {name}')
def cursor(self, connection: Connection) -> Cursor:
"""Returns new `Cursor` instance associated with specified connection and
this distributed transaction manager.
Raises:
InterfaceError: When specified connection is not associated with distributed
connection manager.
"""
assert not self.__closed
if connection not in self._connections:
raise InterfaceError("Cannot create cursor for connection that does "
"not belong to this distributed transaction")
cur = Cursor(connection, self)
self._cursors.append(weakref.ref(cur, self._cursor_deleted))
return cur
@property
def log_context(self) -> Connection:
return UNDEFINED
class Statement(LoggingIdMixin):
"""Prepared SQL statement.
Note:
Implements context manager protocol to call `.free()` automatically.
"""
def __init__(self, connection: Connection, stmt: iStatement, sql: str, dialect: int):
self._connection: Callable[[], Connection] = weakref.ref(connection, self.__dead_con)
self._dialect: int = dialect
self.__sql: str = sql
self._istmt: iStatement = stmt
self._type: StatementType = stmt.get_type()
self._flags: StatementFlag = stmt.get_flags()
self._desc: DESCRIPTION = None
# Input metadata
meta = stmt.get_input_metadata()
self._in_cnt: int = meta.get_count()
self._in_meta: iMessageMetadata = None
self._in_buffer: bytes = None
if self._in_cnt == 0:
meta.release()
else:
self._in_meta = meta
self._in_buffer = create_string_buffer(meta.get_message_length())
# Output metadata
meta = stmt.get_output_metadata()
self._out_meta: iMessageMetadata = None
self._out_cnt: int = meta.get_count()
self._out_buffer: bytes = None
self._out_desc: List[ItemMetadata] = None
if self._out_cnt == 0:
meta.release()
self._out_desc = []
else:
self._out_meta = meta
self._out_buffer = create_string_buffer(meta.get_message_length())
self._out_desc = create_meta_descriptors(meta)
def __enter__(self) -> Statement:
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.free()
def __del__(self):
if self._in_meta or self._out_meta or self._istmt:
warn(f"Statement '{self.logging_id}' disposed without prior free()", ResourceWarning)
self.free()
def __str__(self):
return f'{self.logging_id}[{self.sql}]'
def __repr__(self):
return str(self)
def __dead_con(self, obj) -> None:
self._connection = None
def __get_plan(self, detailed: bool) -> str:
assert self._istmt is not None
result = self._istmt.get_plan(detailed)
return result if result is None else result.strip()
def free(self) -> None:
"""Release the statement and all associated resources.
Important:
The statement SHALL NOT be used after call to this method.
"""
if self._in_meta is not None:
self._in_meta.release()
self._in_meta = None
if self._out_meta is not None:
self._out_meta.release()
self._out_meta = None
if self._istmt is not None:
self._istmt.free()
self._istmt = None
def has_cursor(self) -> bool:
"""Returns True if statement has cursor (can return multiple rows).
"""
assert self._istmt is not None
return StatementFlag.HAS_CURSOR in self._flags
def can_repeat(self) -> bool:
"""Returns True if statement could be executed repeatedly.
"""
assert self._istmt is not None
return StatementFlag.REPEAT_EXECUTE in self._flags
# Properties
@property
def log_context(self) -> Connection:
if self._connection is None:
return 'Connection.GC'
return self._connection()
@property
def plan(self) -> str:
"""Execution plan in classic format.
"""
return self.__get_plan(False)
@property
def detailed_plan(self) -> str:
"""Execution plan in new format (explained).
"""
return self.__get_plan(True)
@property
def sql(self) -> str:
"""SQL statement.
"""
return self.__sql
@property
def type(self) -> StatementType:
"""Statement type.
"""
return self._type
@property
def timeout(self) -> int:
"""Statement timeout.
"""
if self._connection()._engine_version() >= 4.0:
return self._istmt.get_timeout()
raise NotSupportedError(f"Statement timeout not supported by engine version {self._connection()._engine_version()}")
@timeout.setter
def _timeout(self, value: int) -> None:
if self._connection()._engine_version() >= 4.0:
return self._istmt.set_timeout(value)
raise NotSupportedError(f"Statement timeout not supported by engine version {self._connection()._engine_version()}")
class BlobReader(io.IOBase, LoggingIdMixin):
"""Handler for large BLOB values returned by server.
The BlobReader is a “file-like” class, so it acts much like an open file instance.
Attributes:
sub_type (int): BLOB sub-type
newline (str): Sequence used as line terminator, default `'\\\\n'`
Note:
Implements context manager protocol to call `.close()` automatically.
Attributes:
sub_type (int): BLOB sub-type
"""
def __init__(self, blob: iBlob, blob_id: a.ISC_QUAD, sub_type: int,
length: int, segment_size: int, charset: str, owner: Any=None):
self._blob: iBlob = blob
self.newline: str = '\n'
self.sub_type: int = sub_type
self._owner: Any = weakref.ref(owner)
self._charset: str = charset
self._blob_length: int = length
self._segment_size: int = segment_size
self.__blob_id: a.ISC_QUAD = blob_id
self.__bytes_read = 0
self.__pos = 0
self.__index = 0
self.__buf = create_string_buffer(self._segment_size)
self.__buf_pos = 0
self.__buf_data = 0
def __next__(self):
line = self.readline()
if line:
return line
else:
raise StopIteration
def __iter__(self):
return self
def __reset_buffer(self) -> None:
memset(self.__buf, 0, self._segment_size)
self.__buf_pos = 0
self.__buf_data = 0
def __blob_get(self) -> None:
self.__reset_buffer()
# Load BLOB
bytes_actually_read = a.Cardinal(0)
self._blob.get_segment(self._segment_size, byref(self.__buf),
bytes_actually_read)
self.__buf_data = bytes_actually_read.value
def __enter__(self) -> BlobReader:
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
def __del__(self):
if self._blob is not None:
warn(f"BlobReader '{self.logging_id}' disposed without prior close()", ResourceWarning)
self.close()
def __repr__(self):
return f'{self.logging_id}[size={self.length}]'
def flush(self) -> None:
"""Does nothing.
"""
pass
def close(self) -> None:
"""Close the BlobReader.
"""
if self._blob is not None:
self._blob.close()
self._blob = None
def read(self, size: int=-1) -> Union[str, bytes]:
"""Read at most size bytes from the file (less if the read hits EOF
before obtaining size bytes). If the size argument is negative or omitted,
read all data until EOF is reached. The bytes are returned as a string
object. An empty string is returned when EOF is encountered immediately.
Like `file.read()`.
Note:
Performs automatic conversion to `str` for TEXT BLOBs.
"""
assert self._blob is not None
if size >= 0:
to_read = min(size, self._blob_length - self.__pos)
else:
to_read = self._blob_length - self.__pos
return_size = to_read
result: bytes = create_string_buffer(return_size)
pos = 0
while to_read > 0:
to_copy = min(to_read, self.__buf_data - self.__buf_pos)
if to_copy == 0:
self.__blob_get()
to_copy = min(to_read, self.__buf_data - self.__buf_pos)
if to_copy == 0:
# BLOB EOF
break
memmove(byref(result, pos), byref(self.__buf, self.__buf_pos), to_copy)
pos += to_copy
self.__pos += to_copy
self.__buf_pos += to_copy
to_read -= to_copy
result = result.raw[:return_size]
if self.sub_type == 1:
result = result.decode(self._charset)
return result
def readline(self, size: int=-1) -> str:
"""Read and return one line from the BLOB. If size is specified, at most size bytes
will be read.
Uses `newline` as the line terminator.
Raises:
InterfaceError: For non-textual BLOBs.
"""
assert self._blob is not None
if self.sub_type != 1:
raise InterfaceError("Can't read line from binary BLOB")
line = []
to_read = self._blob_length - self.__pos
if size >= 0:
to_read = min(to_read, size)
found = False
while to_read > 0 and not found:
to_scan = min(to_read, self.__buf_data - self.__buf_pos)
if to_scan == 0:
self.__blob_get()
to_scan = min(to_read, self.__buf_data - self.__buf_pos)
if to_scan == 0:
# BLOB EOF
break
pos = 0
while pos < to_scan:
if self.__buf[self.__buf_pos+pos] == b'\n':
found = True
pos += 1
break
pos += 1
line.append(string_at(byref(self.__buf, self.__buf_pos), pos).decode(self._charset))
self.__buf_pos += pos
self.__pos += pos
to_read -= pos
result = ''.join(line)
if self.newline != '\n':
result = result.replace('\n', self.newline)
return result
def readlines(self, hint: int=-1) -> List[str]:
"""Read and return a list of lines from the stream. `hint` can be specified to
control the number of lines read: no more lines will be read if the total size
(in bytes/characters) of all lines so far exceeds hint.
Note:
It’s already possible to iterate on BLOB using `for line in blob:` ... without
calling `.readlines()`.
Raises:
InterfaceError: For non-textual BLOBs.
"""
result = []
line = self.readline()
while line:
if hint >= 0 and len(result) == hint:
break
result.append(line)
line = self.readline()
return result
def seek(self, offset: int, whence: int=os.SEEK_SET) -> None:
"""Set the file’s current position, like stdio‘s `fseek()`.
See:
:meth:`io.IOBase.seek()` for details.
Arguments:
offset: Offset from specified position.
whence: Context for offset. Accepted values: os.SEEK_SET, os.SEEK_CUR or os.SEEK_END
Warning:
If BLOB was NOT CREATED as `stream` BLOB, this method raises `DatabaseError`
exception. This constraint is set by Firebird.
"""
assert self._blob is not None
self.__pos = self._blob.seek(whence, offset)
self.__reset_buffer()
def tell(self) -> int:
"""Return current position in BLOB.
See:
:meth:`io.IOBase.tell()` for details.
"""
return self.__pos
def is_text(self) -> bool:
"""True if BLOB is a text BLOB.
"""
return self.sub_type == 1
# Properties
@property
def log_context(self) -> Any:
if self._owner is None:
return UNDEFINED
if (r := self._owner()) is not None:
return r
return 'Owner.GC'
@property
def length(self) -> int:
"""BLOB length.
"""
return self._blob_length
@property
def closed(self) -> bool:
"""True if the BLOB is closed.
"""
return self._blob is None
@property
def mode(self) -> str:
"""File mode ('r' or 'rb').
"""
return 'rb' if self.sub_type != 1 else 'r'
@property
def blob_id(self) -> a.ISC_QUAD:
"""BLOB ID.
"""
return self.__blob_id
@property
def blob_type(self) -> BlobType:
"""BLOB type.
"""
result = self._blob.get_info2(BlobInfoCode.TYPE)
return BlobType(result)
class Cursor(LoggingIdMixin):
"""Represents a database cursor, which is used to execute SQL statement and
manage the context of a fetch operation.
Note:
Implements context manager protocol to call `.close()` automatically.
"""
#: This read/write attribute specifies the number of rows to fetch at a time with
#: .fetchmany(). It defaults to 1 meaning to fetch a single row at a time.
#:
#: Required by Python DB API 2.0
arraysize: int = 1
def __init__(self, connection: Connection, transaction: TransactionManager):
self._connection: Connection = connection
self._dialect: int = connection.sql_dialect
self._transaction: TransactionManager = transaction
self._stmt: Statement = None
self._encoding: str = connection._encoding
self._result: iResultSet = None
self._last_fetch_status: StateResult = None
self._name: str = None
self._executed: bool = False
self._cursor_flags: CursorFlag = CursorFlag.NONE
self.__output_cache: Tuple = None
self.__internal: bool = False
self.__blob_readers: Set = weakref.WeakSet()
#: Names of columns that should be returned as `BlobReader`.
self.stream_blobs: List[str] = []
#: BLOBs greater than threshold are returned as `BlobReader` instead in materialized form.
self.stream_blob_threshold = driver_config.stream_blob_threshold.value
def __enter__(self) -> Cursor:
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
def __del__(self):
if self._result is not None or self._stmt is not None or self.__blob_readers:
warn(f"Cursor '{self.logging_id}' disposed without prior close()", ResourceWarning)
self.close()
def __next__(self):
if (row := self.fetchone()) is not None:
return row
else:
raise StopIteration
def __iter__(self):
return self
def _dead_con(self, obj) -> None:
self._connection = None
def _extract_db_array_to_list(self, esize: int, dtype: int, subtype: int,
scale: int, dim: int, dimensions: List[int],
buf: Any, bufpos: int) -> Tuple[Any, int]:
value = []
if dim == len(dimensions)-1:
for _ in range(dimensions[dim]):
if dtype in (a.blr_text, a.blr_text2):
val = string_at(buf[bufpos:bufpos+esize], esize)
### Todo: verify handling of P version differences
if subtype != 1: # non OCTETS
val = val.decode(self._encoding)
# CHAR with multibyte encoding requires special handling
if subtype in (4, 69): # UTF8 and GB18030
reallength = esize // 4
elif subtype == 3: # UNICODE_FSS
reallength = esize // 3
else:
reallength = esize
val = val[:reallength]
elif dtype in (a.blr_varying, a.blr_varying2):
val = string_at(buf[bufpos:bufpos+esize])
if subtype != a.OCTETS:
val = val.decode(self._encoding)
elif dtype in (a.blr_short, a.blr_long, a.blr_int64):
val = (0).from_bytes(buf[bufpos:bufpos + esize], 'little', signed=True)
if subtype or scale:
val = decimal.Decimal(val) / _tenTo[abs(256-scale)]
elif dtype == a.blr_bool:
val = (0).from_bytes(buf[bufpos:bufpos + esize], 'little') == 1
elif dtype == a.blr_float:
val = struct.unpack('f', buf[bufpos:bufpos+esize])[0]
elif dtype in (a.blr_d_float, a.blr_double):
val = struct.unpack('d', buf[bufpos:bufpos+esize])[0]
elif dtype == a.blr_timestamp:
val = datetime.datetime.combine(_util.decode_date(buf[bufpos:bufpos+4]),
_util.decode_time(buf[bufpos+4:bufpos+esize]))
elif dtype == a.blr_sql_date:
val = _util.decode_date(buf[bufpos:bufpos+esize])
elif dtype == a.blr_sql_time:
val = _util.decode_time(buf[bufpos:bufpos+esize])
elif dtype == a.blr_sql_time_tz:
val = _util.decode_time_tz(buf[bufpos:bufpos+esize])
elif dtype == a.blr_timestamp_tz:
val = _util.decode_timestamp_tz(buf[bufpos:bufpos+esize])
elif dtype == a.blr_int128:
val = decimal.Decimal(_util.get_int128().to_str(a.FB_I128.from_buffer_copy(buf[bufpos:bufpos+esize]), scale))
elif dtype == a.blr_dec64:
val = decimal.Decimal(_util.get_decfloat16().to_str(a.FB_DEC16.from_buffer_copy(buf[bufpos:bufpos+esize])))
elif dtype == a.blr_dec128:
val = decimal.Decimal(_util.get_decfloat34().to_str(a.FB_DEC34.from_buffer_copy(buf[bufpos:bufpos+esize])))
else: # pragma: no cover
raise InterfaceError(f"Unsupported Firebird ARRAY subtype: {dtype}")
value.append(val)
bufpos += esize
else:
for _ in range(dimensions[dim]):
(val, bufpos) = self._extract_db_array_to_list(esize, dtype, subtype,
scale, dim + 1,
dimensions,
buf, bufpos)
value.append(val)
return (value, bufpos)
def _copy_list_to_db_array(self, esize: int, dtype: int, subtype: int,
scale: int, dim: int, dimensions: List[int],
value: Any, buf: Any, bufpos: int) -> None:
valuebuf = None
if dtype in (a.blr_text, a.blr_text2):
valuebuf = create_string_buffer(bytes([0]), esize)
elif dtype in (a.blr_varying, a.blr_varying2):
valuebuf = create_string_buffer(bytes([0]), esize)
elif dtype in (a.blr_short, a.blr_long, a.blr_int64):
if esize == 2:
valuebuf = a.ISC_SHORT(0)
elif esize == 4:
valuebuf = a.ISC_LONG(0)
elif esize == 8:
valuebuf = a.ISC_INT64(0)
else: # pragma: no cover
raise InterfaceError("Unsupported number type")
elif dtype == a.blr_float:
valuebuf = create_string_buffer(bytes([0]), esize)
elif dtype in (a.blr_d_float, a.blr_double):
valuebuf = create_string_buffer(bytes([0]), esize)
elif dtype == a.blr_timestamp:
valuebuf = create_string_buffer(bytes([0]), esize)
elif dtype == a.blr_sql_date:
valuebuf = create_string_buffer(bytes([0]), esize)
elif dtype == a.blr_sql_time:
valuebuf = create_string_buffer(bytes([0]), esize)
elif dtype == a.blr_bool:
valuebuf = create_string_buffer(bytes([0]), esize)
else: # pragma: no cover
raise InterfaceError(f"Unsupported Firebird ARRAY subtype: {dtype}")
self._fill_db_array_buffer(esize, dtype,
subtype, scale,
dim, dimensions,
value, valuebuf,
buf, bufpos)
def _fill_db_array_buffer(self, esize: int, dtype: int, subtype: int,
scale: int, dim: int, dimensions: List[int],
value: Any, valuebuf: Any, buf: Any, bufpos: int) -> int:
if dim == len(dimensions)-1:
for i in range(dimensions[dim]):
if dtype in (a.blr_text, a.blr_text2,
a.blr_varying, a.blr_varying2):
val = value[i]
if isinstance(val, str):
val = val.encode(self._encoding)
if len(val) > esize:
raise ValueError(f"ARRAY value of parameter is too long,"
f" expected {esize}, found {len(val)}")
valuebuf.value = val
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype in (a.blr_short, a.blr_long, a.blr_int64):
if subtype or scale:
val = value[i]
if isinstance(val, decimal.Decimal):
val = int((val * _tenTo[256-abs(scale)]).to_integral())
elif isinstance(val, (int, float)):
val = int(val * _tenTo[256-abs(scale)])
else:
raise TypeError(f'Objects of type {type(val)} are not '
f' acceptable input for'
f' a fixed-point column.')
valuebuf.value = val
else:
if esize == 2:
valuebuf.value = value[i]
elif esize == 4:
valuebuf.value = value[i]
elif esize == 8:
valuebuf.value = value[i]
else: # pragma: no cover
raise InterfaceError("Unsupported type")
memmove(byref(buf, bufpos),
byref(valuebuf),
esize)
elif dtype == a.blr_bool:
valuebuf.value = (1 if value[i] else 0).to_bytes(1, 'little')
memmove(byref(buf, bufpos),
byref(valuebuf),
esize)
elif dtype == a.blr_float:
valuebuf.value = struct.pack('f', value[i])
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype in (a.blr_d_float, a.blr_double):
valuebuf.value = struct.pack('d', value[i])
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype == a.blr_timestamp:
valuebuf.value = _encode_timestamp(value[i])
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype == a.blr_sql_date:
valuebuf.value = _util.encode_date(value[i]).to_bytes(4, 'little')
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype == a.blr_sql_time:
valuebuf.value = _util.encode_time(value[i]).to_bytes(4, 'little')
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype == a.blr_sql_time_tz:
valuebuf.value = _util.encode_time_tz(value[i]).to_bytes(esize, 'little')
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype == a.blr_timestamp_tz:
valuebuf.value = _util.encode_timestamp_tz(value[i])
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype == a.blr_dec64:
valuebuf.value = _util.get_decfloat16().from_str(str(value[i]))
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype == a.blr_dec128:
valuebuf.value = _util.get_decfloat34().from_str(str(value[i]))
memmove(byref(buf, bufpos), valuebuf, esize)
elif dtype == a.blr_int128:
valuebuf.value = _util.get_int128().from_str(str(value), scale)
memmove(byref(buf, bufpos), valuebuf, esize)
else: # pragma: no cover
raise InterfaceError(f"Unsupported Firebird ARRAY subtype: {dtype}")
bufpos += esize
else:
for i in range(dimensions[dim]):
bufpos = self._fill_db_array_buffer(esize, dtype, subtype,
scale, dim+1,
dimensions, value[i],
valuebuf, buf, bufpos)
return bufpos
def _validate_array_value(self, dim: int, dimensions: List[int],
value_type: int, sqlsubtype: int,
value_scale: int, value: Any) -> bool:
ok = isinstance(value, (list, tuple))
ok = ok and (len(value) == dimensions[dim])
if not ok:
return False
for i in range(dimensions[dim]):
if dim == len(dimensions) - 1:
# leaf: check value type
if value_type in (a.blr_text, a.blr_text2, a.blr_varying, a.blr_varying2):
ok = isinstance(value[i], str)
elif value_type in (a.blr_short, a.blr_long, a.blr_int64, a.blr_int128):
if sqlsubtype or value_scale:
ok = isinstance(value[i], decimal.Decimal)
else:
ok = isinstance(value[i], int)
elif value_type in (a.blr_dec64, a.blr_dec128):
ok = isinstance(value[i], decimal.Decimal)
elif value_type == a.blr_float:
ok = isinstance(value[i], float)
elif value_type in (a.blr_d_float, a.blr_double):
ok = isinstance(value[i], float)
elif value_type in (a.blr_timestamp, a.blr_timestamp_tz):
ok = isinstance(value[i], datetime.datetime)
elif value_type == a.blr_sql_date:
ok = isinstance(value[i], datetime.date)
elif value_type in (a.blr_sql_time, a.blr_sql_time_tz):
ok = isinstance(value[i], datetime.time)
elif value_type == a.blr_bool:
ok = isinstance(value[i], bool)
else:
ok = False
else:
# non-leaf: recurse down
ok = ok and self._validate_array_value(dim + 1, dimensions,
value_type, sqlsubtype,
value_scale, value[i])
if not ok: # Fail early
return False
return ok
def _pack_input(self, meta: iMessageMetadata, buffer: bytes,
parameters: Sequence) -> Tuple[iMessageMetadata, bytes]:
in_cnt = meta.get_count()
if len(parameters) != in_cnt:
raise InterfaceError(f"Statement parameter sequence contains"
f" {len(parameters)} items,"
f" but exactly {in_cnt} are required")
#
buf_size = len(buffer)
memset(buffer, 0, buf_size)
# Adjust metadata where needed
with meta.get_builder() as builder:
for i in range(in_cnt):
value = parameters[i]
if _is_str_param(value, meta.get_type(i)):
builder.set_type(i, SQLDataType.TEXT)
if not isinstance(value, (str, bytes, bytearray)):
value = str(value)
builder.set_length(i, len(value.encode(self._encoding)) if isinstance(value, str) else len(value))
in_meta = builder.get_metadata()
new_size = in_meta.get_message_length()
in_buffer = create_string_buffer(new_size) if buf_size < new_size else buffer
buf_addr = addressof(in_buffer)
with in_meta:
for i in range(in_cnt):
value = parameters[i]
datatype = in_meta.get_type(i)
length = in_meta.get_length(i)
offset = in_meta.get_offset(i)
# handle NULL value
in_buffer[in_meta.get_null_offset(i)] = 1 if value is None else 0
if value is None:
continue
# store parameter value
if _is_str_param(value, datatype):
# Implicit conversion to string
if not isinstance(value, (str, bytes, bytearray)):
value = str(value)
if isinstance(value, str) and self._encoding:
value = value.encode(self._encoding)
if (datatype in [SQLDataType.TEXT, SQLDataType.VARYING]
and len(value) > length):
raise ValueError(f"Value of parameter ({i}) is too long,"
f" expected {length}, found {len(value)}")
memmove(buf_addr + offset, value, len(value))
elif datatype in [SQLDataType.SHORT, SQLDataType.LONG, SQLDataType.INT64]:
# It's scalled integer?
scale = in_meta.get_scale(i)
if in_meta.get_subtype(i) or scale:
if isinstance(value, decimal.Decimal):
value = int((value * _tenTo[abs(scale)]).to_integral())
elif isinstance(value, (int, float)):
value = int(value * _tenTo[abs(scale)])
else:
raise TypeError(f'Objects of type {type(value)} are not '
f' acceptable input for'
f' a fixed-point column.')
_check_integer_range(value, self._dialect, datatype,
in_meta.get_subtype(i), scale)
memmove(buf_addr + offset, value.to_bytes(length, 'little', signed=True), length)
elif datatype == SQLDataType.DATE:
memmove(buf_addr + offset, _util.encode_date(value).to_bytes(length, 'little'), length)
elif datatype == SQLDataType.TIME:
memmove(buf_addr + offset, _util.encode_time(value).to_bytes(length, 'little'), length)
elif datatype == SQLDataType.TIME_TZ:
memmove(buf_addr + offset, _util.encode_time_tz(value), length)
elif datatype == SQLDataType.TIMESTAMP:
memmove(buf_addr + offset, _encode_timestamp(value), length)
elif datatype == SQLDataType.TIMESTAMP_TZ:
memmove(buf_addr + offset, _util.encode_timestamp_tz(value), length)
elif datatype == SQLDataType.DEC16:
memmove(buf_addr + offset, byref(_util.get_decfloat16().from_str(str(value))), length)
elif datatype == SQLDataType.DEC34:
memmove(buf_addr + offset, _util.get_decfloat34().from_str(str(value)), length)
elif datatype == SQLDataType.INT128:
memmove(buf_addr + offset, _util.get_int128().from_str(str(value), in_meta.get_scale(i)), length)
elif datatype == SQLDataType.FLOAT:
memmove(buf_addr + offset, struct.pack('f', value), length)
elif datatype == SQLDataType.DOUBLE:
memmove(buf_addr + offset, struct.pack('d', value), length)
elif datatype == SQLDataType.BOOLEAN:
memmove(buf_addr + offset, (1 if value else 0).to_bytes(length, 'little'), length)
elif datatype == SQLDataType.BLOB:
blobid = a.ISC_QUAD(0, 0)
if hasattr(value, 'read'):
# It seems we've got file-like object, use stream BLOB
blob_buf = _create_blob_buffer()
blob: iBlob = self._connection._att.create_blob(self._transaction._tra,
blobid, _bpb_stream)
try:
memmove(buf_addr + offset, addressof(blobid), length)
while value_chunk := value.read(MAX_BLOB_SEGMENT_SIZE):
blob_buf.raw = value_chunk.encode(self._encoding) if isinstance(value_chunk, str) else value_chunk
blob.put_segment(len(value_chunk), blob_buf)
memset(blob_buf, 0, MAX_BLOB_SEGMENT_SIZE)
finally:
blob.close()
del blob_buf
else:
# Non-stream BLOB
if isinstance(value, str):
if in_meta.get_subtype(i) == 1:
value = value.encode(self._encoding)
else:
raise TypeError('String value is not'
' acceptable type for'
' a non-textual BLOB column.')
blob_buf = create_string_buffer(value)
blob: iBlob = self._connection._att.create_blob(self._transaction._tra,
blobid)
try:
memmove(buf_addr + offset, addressof(blobid), length)
total_size = len(value)
bytes_written_so_far = 0
bytes_to_write_this_time = MAX_BLOB_SEGMENT_SIZE
while bytes_written_so_far < total_size:
if (total_size - bytes_written_so_far) < MAX_BLOB_SEGMENT_SIZE:
bytes_to_write_this_time = (total_size - bytes_written_so_far)
blob.put_segment(bytes_to_write_this_time,
addressof(blob_buf) + bytes_written_so_far)
bytes_written_so_far += bytes_to_write_this_time
finally:
blob.close()
del blob_buf
elif datatype == SQLDataType.ARRAY:
arrayid = a.ISC_QUAD(0, 0)
arrayid_ptr = pointer(arrayid)
arraydesc = a.ISC_ARRAY_DESC(0)
isc_status = a.ISC_STATUS_ARRAY()
db_handle = a.FB_API_HANDLE(0)
tr_handle = a.FB_API_HANDLE(0)
relname = in_meta.get_relation(i).encode(self._encoding)
sqlname = in_meta.get_field(i).encode(self._encoding)
api = a.get_api()
api.fb_get_database_handle(isc_status, db_handle, self._connection._att)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Cursor._pack_input:fb_get_database_handle()")
api.fb_get_transaction_handle(isc_status, tr_handle, self._transaction._tra)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Cursor._pack_input:fb_get_transaction_handle()")
sqlsubtype = self._connection._get_array_sqlsubtype(relname, sqlname)
api.isc_array_lookup_bounds(isc_status, db_handle, tr_handle,
relname, sqlname, arraydesc)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Cursor._pack_input:isc_array_lookup_bounds()")
value_type = arraydesc.array_desc_dtype
value_scale = arraydesc.array_desc_scale
value_size = arraydesc.array_desc_length
if value_type in (a.blr_varying, a.blr_varying2):
value_size += 2
dimensions = []
total_num_elements = 1
for dimension in range(arraydesc.array_desc_dimensions):
bounds = arraydesc.array_desc_bounds[dimension]
dimensions.append((bounds.array_bound_upper + 1) - bounds.array_bound_lower)
total_num_elements *= dimensions[dimension]
total_size = total_num_elements * value_size
# Validate value to make sure it matches the array structure
if not self._validate_array_value(0, dimensions, value_type,
sqlsubtype, value_scale, value):
raise ValueError("Incorrect ARRAY field value.")
value_buffer = create_string_buffer(total_size)
tsize = a.ISC_LONG(total_size)
self._copy_list_to_db_array(value_size, value_type,
sqlsubtype, value_scale,
0, dimensions,
value, value_buffer, 0)
api.isc_array_put_slice(isc_status, db_handle, tr_handle,
arrayid_ptr, arraydesc,
value_buffer, tsize)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Cursor._pack_input:/isc_array_put_slice()")
memmove(buf_addr + offset, addressof(arrayid), length)
#
in_meta.add_ref() # Everything went just fine, so we keep the metadata past 'with'
return (in_meta, in_buffer)
def _unpack_output(self) -> Tuple:
values = []
buffer = self._stmt._out_buffer
buf_addr = addressof(buffer)
for desc in self._stmt._out_desc:
value: Any = '<NOT_IMPLEMENTED>'
if ord(buffer[desc.null_offset]) != 0:
value = None
else:
datatype = desc.datatype
offset = desc.offset
length = desc.length
if datatype == SQLDataType.TEXT:
value = string_at(buf_addr + offset, length)
if desc.charset != a.OCTETS:
value = value.decode(self._encoding)
# CHAR with multibyte encoding requires special handling
if desc.charset in (4, 69): # UTF8 and GB18030
reallength = length // 4
elif desc.charset == 3: # UNICODE_FSS
reallength = length // 3
else:
reallength = length
value = value[:reallength]
elif datatype == SQLDataType.VARYING:
size = (0).from_bytes(string_at(buf_addr + offset, 2), 'little')
value = string_at(buf_addr + offset + 2, size)
if desc.charset != 1:
value = value.decode(self._encoding)
elif datatype == SQLDataType.BOOLEAN:
value = bool((0).from_bytes(buffer[offset], 'little'))
elif datatype in [SQLDataType.SHORT, SQLDataType.LONG, SQLDataType.INT64]:
value = (0).from_bytes(buffer[offset:offset + length], 'little', signed=True)
# It's scalled integer?
if desc.subtype or desc.scale:
value = decimal.Decimal(value) / _tenTo[abs(desc.scale)]
elif datatype == SQLDataType.DATE:
value = _util.decode_date(buffer[offset:offset+length])
elif datatype == SQLDataType.TIME:
value = _util.decode_time(buffer[offset:offset+length])
elif datatype == SQLDataType.TIME_TZ:
value = _util.decode_time_tz(buffer[offset:offset+length])
elif datatype == SQLDataType.TIMESTAMP:
value = datetime.datetime.combine(_util.decode_date(buffer[offset:offset+4]),
_util.decode_time(buffer[offset+4:offset+length]))
elif datatype == SQLDataType.TIMESTAMP_TZ:
value = _util.decode_timestamp_tz(buffer[offset:offset+length])
elif datatype == SQLDataType.INT128:
value = decimal.Decimal(_util.get_int128().to_str(a.FB_I128.from_buffer_copy(buffer[offset:offset+length]), desc.scale))
elif datatype == SQLDataType.DEC16:
value = decimal.Decimal(_util.get_decfloat16().to_str(a.FB_DEC16.from_buffer_copy(buffer[offset:offset+length])))
elif datatype == SQLDataType.DEC34:
value = decimal.Decimal(_util.get_decfloat34().to_str(a.FB_DEC34.from_buffer_copy(buffer[offset:offset+length])))
elif datatype == SQLDataType.FLOAT:
value = struct.unpack('f', buffer[offset:offset+length])[0]
elif datatype == SQLDataType.DOUBLE:
value = struct.unpack('d', buffer[offset:offset+length])[0]
elif datatype == SQLDataType.BLOB:
val = buffer[offset:offset+length]
blobid = a.ISC_QUAD((0).from_bytes(val[:4], 'little'),
(0).from_bytes(val[4:], 'little'))
blob = self._connection._att.open_blob(self._transaction._tra, blobid, _bpb_stream)
# Get BLOB total length and max. size of segment
blob_length = blob.get_info2(BlobInfoCode.TOTAL_LENGTH)
segment_size = blob.get_info2(BlobInfoCode.MAX_SEGMENT)
# Check if stream BLOB is requested instead materialized one
if ((self.stream_blobs and (desc.alias if desc.alias != desc.field else desc.field) in self.stream_blobs)
or (blob_length > self.stream_blob_threshold)):
# Stream BLOB
value = BlobReader(blob, blobid, desc.subtype, blob_length,
segment_size, self._encoding, self)
self.__blob_readers.add(value)
else:
# Materialized BLOB
try:
# Load BLOB
blob_value = create_string_buffer(blob_length)
bytes_read = 0
bytes_actually_read = a.Cardinal(0)
while bytes_read < blob_length:
blob.get_segment(min(segment_size, blob_length - bytes_read),
byref(blob_value, bytes_read),
bytes_actually_read)
bytes_read += bytes_actually_read.value
# Finalize value
value = blob_value.raw
if desc.subtype == 1:
value = value.decode(self._encoding)
finally:
blob.close()
del blob_value
elif datatype == SQLDataType.ARRAY:
value = []
val = buffer[offset:offset+length]
arrayid = a.ISC_QUAD((0).from_bytes(val[:4], 'little'),
(0).from_bytes(val[4:], 'little'))
arraydesc = a.ISC_ARRAY_DESC(0)
isc_status = a.ISC_STATUS_ARRAY()
db_handle = a.FB_API_HANDLE(0)
tr_handle = a.FB_API_HANDLE(0)
relname = desc.relation.encode(self._encoding)
sqlname = desc.field.encode(self._encoding)
api = a.get_api()
api.fb_get_database_handle(isc_status, db_handle, self._connection._att)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Cursor._unpack_output:fb_get_database_handle()")
api.fb_get_transaction_handle(isc_status, tr_handle, self._transaction._tra)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Cursor._unpack_output:fb_get_transaction_handle()")
sqlsubtype = self._connection._get_array_sqlsubtype(relname, sqlname)
api.isc_array_lookup_bounds(isc_status, db_handle, tr_handle,
relname, sqlname, arraydesc)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Cursor._unpack_output:isc_array_lookup_bounds()")
value_type = arraydesc.array_desc_dtype
value_scale = arraydesc.array_desc_scale
value_size = arraydesc.array_desc_length
if value_type in (a.blr_varying, a.blr_varying2):
value_size += 2
dimensions = []
total_num_elements = 1
for dimension in range(arraydesc.array_desc_dimensions):
bounds = arraydesc.array_desc_bounds[dimension]
dimensions.append((bounds.array_bound_upper + 1) - bounds.array_bound_lower)
total_num_elements *= dimensions[dimension]
total_size = total_num_elements * value_size
value_buffer = create_string_buffer(total_size)
tsize = a.ISC_LONG(total_size)
api.isc_array_get_slice(isc_status, db_handle, tr_handle,
arrayid, arraydesc,
value_buffer, tsize)
if a.db_api_error(isc_status): # pragma: no cover
raise a.exception_from_status(DatabaseError,
isc_status,
"Error in Cursor._unpack_output:isc_array_get_slice()")
(value, bufpos) = self._extract_db_array_to_list(value_size,
value_type,
sqlsubtype,
value_scale,
0, dimensions,
value_buffer, 0)
values.append(value)
return tuple(values)
def _fetchone(self) -> Optional[Tuple]:
if self._executed:
if self._stmt._out_cnt == 0:
return None
if self._last_fetch_status == StateResult.NO_DATA:
return None
if self.__output_cache is not None:
result = self.__output_cache
self._last_fetch_status = StateResult.NO_DATA
self.__output_cache = None
return result
else:
self._last_fetch_status = self._result.fetch_next(self._stmt._out_buffer)
if self._last_fetch_status == StateResult.OK:
return self._unpack_output()
else:
return None
raise InterfaceError("Cannot fetch from cursor that did not executed a statement.")
def _execute(self, operation: Union[str, Statement],
parameters: Sequence=None, flags: CursorFlag=CursorFlag.NONE) -> None:
if not self._transaction.is_active():
self._transaction.begin()
if isinstance(operation, Statement):
if operation._connection() is not self._connection:
raise InterfaceError('Cannot execute Statement that was created by different Connection.')
self.close()
self._stmt = operation
self.__internal = False
elif self._stmt is not None and self._stmt.sql == operation:
# We should execute the same SQL string again
self._clear()
else:
self.close()
self._stmt = self._connection._prepare(operation, self._transaction)
self.__internal = True
self._cursor_flags = flags
in_meta = None
# Execute the statement
try:
if self._stmt._in_cnt > 0:
in_meta, self._stmt._in_buffer = self._pack_input(self._stmt._in_meta,
self._stmt._in_buffer,
parameters)
if self._stmt.has_cursor():
# Statement returns multiple rows
self._result = self._stmt._istmt.open_cursor(self._transaction._tra,
in_meta, self._stmt._in_buffer,
self._stmt._out_meta,
flags)
else:
# Statement may return single row
self._stmt._istmt.execute(self._transaction._tra, in_meta,
self._stmt._in_buffer,
self._stmt._out_meta, self._stmt._out_buffer)
if self._stmt._out_buffer is not None:
self.__output_cache = self._unpack_output()
self._executed = True
self._last_fetch_status = None
finally:
if in_meta is not None:
in_meta.release()
def _clear(self) -> None:
if self._result is not None:
self._result.close()
self._result = None
self._name = None
self._last_fetch_status = None
self._executed = False
self.__output_cache = None
while self.__blob_readers:
self.__blob_readers.pop().close()
def callproc(self, proc_name: str, parameters: Sequence=None) -> None:
"""Executes a stored procedure with the given name.
Arguments:
proc_name: Stored procedure name.
parameters: Sequence of parameters. Must contain one entry for each argument
that the procedure expects.
.. note::
If stored procedure does have output parameters, you must retrieve their values
saparatelly by `.Cursor.fetchone()` call. This method is not very convenient,
but conforms to Python DB API 2.0. If you don't require conformance to Python
DB API, it's recommended to use more convenient method `.Cursor.call_procedure()`
instead.
"""
params = [] if parameters is None else parameters
sql = ('EXECUTE PROCEDURE ' + proc_name + ' '
+ ','.join('?' * len(params)))
self.execute(sql, params)
def call_procedure(self, proc_name: str, parameters: Sequence=None) -> Optional[Tuple]:
"""Executes a stored procedure with the given name.
Arguments:
proc_name: Stored procedure name.
parameters: Sequence of parameters. Must contain one entry for each argument
that the procedure expects.
Returns:
None or tuple with values returned by stored procedure.
"""
self.callproc(proc_name, parameters)
return self.fetchone() if self._stmt._out_cnt > 0 else None
def set_cursor_name(self, name: str) -> None:
"""Sets name for the SQL cursor.
Arguments:
name: Cursor name.
"""
if not self._executed:
raise InterfaceError("Cannot set name for cursor has not yet "
"executed a statement")
if self._name:
raise InterfaceError("Cursor's name has already been declared in"
" context of currently executed statement")
self._stmt._istmt.set_cursor_name(name)
self._name = name
def prepare(self, operation: str) -> Statement:
"""Creates prepared statement for repeated execution.
Arguments:
operation: SQL command.
"""
return self._connection._prepare(operation, self._transaction)
def open(self, operation: Union[str, Statement], parameters: Sequence[Any]=None) -> Cursor:
"""Executes SQL command or prepared `Statement` as scrollable.
Starts new transaction if transaction manager associated with cursor is not active.
Arguments:
operation: SQL command or prepared `Statement`.
parameters: Sequence of parameters. Must contain one entry for each argument
that the operation expects.
Note:
If `operation` is a string with SQL command that is exactly the same as the
last executed command, the internally prepared `Statement` from last execution
is reused.
If cursor is open, it's closed before new statement is executed.
"""
self._execute(operation, parameters, CursorFlag.SCROLLABLE)
def execute(self, operation: Union[str, Statement], parameters: Sequence[Any]=None) -> Cursor:
"""Executes SQL command or prepared `Statement`.
Starts new transaction if transaction manager associated with cursor is not active.
Arguments:
operation: SQL command or prepared `Statement`.
parameters: Sequence of parameters. Must contain one entry for each argument
that the operation expects.
Returns:
`self` so call to execute could be used as iterator over returned rows.
Note:
If `operation` is a string with SQL command that is exactly the same as the
last executed command, the internally prepared `Statement` from last execution
is reused.
If cursor is open, it's closed before new statement is executed.
"""
self._execute(operation, parameters)
return self
def executemany(self, operation: Union[str, Statement],
seq_of_parameters: Sequence[Sequence[Any]]) -> None:
"""Executes SQL command or prepared statement against all parameter
sequences found in the sequence `seq_of_parameters`.
Starts new transaction if transaction manager associated with cursor is not active.
Arguments:
operation: SQL command or prepared `Statement`.
seq_of_parameters: Sequence of sequences of parameters. Must contain
one sequence of parameters for each execution
that has one entry for each argument that the
operation expects.
Note:
This function simply calls `.execute` in a loop, feeding it with
parameters from `seq_of_parameters`. Because `.execute` reuses the statement,
calling `executemany` is equally efective as direct use of prepared `Statement`
and calling `execute` in a loop directly in application.
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def close(self) -> None:
"""Close the cursor and release all associated resources.
The result set (if any) from last executed statement is released, and if executed
`Statement` was not supplied externally, it's released as well.
Note:
The closed cursor could be used to execute further SQL commands.
"""
self._clear()
if self._stmt is not None:
if self.__internal:
self._stmt.free()
self._stmt = None
def fetchone(self) -> Tuple:
"""Fetch the next row of a query result set.
"""
if self._stmt:
return self._fetchone()
else:
raise InterfaceError("Cannot fetch from cursor that did not executed a statement.")
def fetchmany(self, size: int=None) -> List[Tuple]:
"""Fetch the next set of rows of a query result, returning a sequence of
sequences (e.g. a list of tuples).
An empty sequence is returned when no more rows are available. The number of rows
to fetch per call is specified by the parameter. If it is not given, the cursor’s
`.arraysize` determines the number of rows to be fetched. The method does try to
fetch as many rows as indicated by the size parameter. If this is not possible due
to the specified number of rows not being available, fewer rows may be returned.
Arguments:
size: The number of rows to fetch.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
if (row := self.fetchone()) is not None:
result.append(row)
else:
break
return result
def fetchall(self) -> List[Tuple]:
"""Fetch all remaining rows of a query result set.
"""
return [row for row in self]
def fetch_next(self) -> Optional[Tuple]:
"""Fetch the next row of a scrollable query result set.
Returns None if there is no row to be fetched.
"""
assert self._result is not None
self._last_fetch_status = self._result.fetch_next(self._stmt._out_buffer)
if self._last_fetch_status == StateResult.OK:
return self._unpack_output()
else:
return None
def fetch_prior(self) -> Optional[Tuple]:
"""Fetch the previous row of a scrollable query result set.
Returns None if there is no row to be fetched.
"""
assert self._result is not None
self._last_fetch_status = self._result.fetch_prior(self._stmt._out_buffer)
if self._last_fetch_status == StateResult.OK:
return self._unpack_output()
else:
return None
def fetch_first(self) -> Optional[Tuple]:
"""Fetch the first row of a scrollable query result set.
Returns None if there is no row to be fetched.
"""
assert self._result is not None
self._last_fetch_status = self._result.fetch_first(self._stmt._out_buffer)
if self._last_fetch_status == StateResult.OK:
return self._unpack_output()
else:
return None
def fetch_last(self) -> Optional[Tuple]:
"""Fetch the last row of a scrollable query result set.
Returns None if there is no row to be fetched.
"""
assert self._result is not None
self._last_fetch_status = self._result.fetch_last(self._stmt._out_buffer)
if self._last_fetch_status == StateResult.OK:
return self._unpack_output()
else:
return None
def fetch_absolute(self, position: int) -> Optional[Tuple]:
"""Fetch the row of a scrollable query result set specified by absolute position.
Returns None if there is no row to be fetched.
Arguments:
position: Absolute position number of row in result set.
"""
assert self._result is not None
self._last_fetch_status = self._result.fetch_absolute(position, self._stmt._out_buffer)
if self._last_fetch_status == StateResult.OK:
return self._unpack_output()
else:
return None
def fetch_relative(self, offset: int) -> Optional[Tuple]:
"""Fetch the row of a scrollable query result set specified by relative position.
Returns None if there is no row to be fetched.
Arguments:
offset: Relative position number of row in result set. Negative value refers
to previous row, positive to next row.
"""
assert self._result is not None
self._last_fetch_status = self._result.fetch_relative(offset, self._stmt._out_buffer)
if self._last_fetch_status == StateResult.OK:
return self._unpack_output()
else:
return None
def setinputsizes(self, sizes: Sequence[Type]) -> None:
"""Required by Python DB API 2.0, but pointless for Firebird, so it does nothing.
"""
pass
def setoutputsize(self, size: int, column: int=None) -> None:
"""Required by Python DB API 2.0, but pointless for Firebird, so it does nothing.
"""
pass
def is_closed(self) -> bool:
"""Returns True if cursor is closed.
"""
return self._stmt is None
def is_eof(self) -> bool:
"""Returns True is scrollable cursor is positioned at the end.
"""
assert self._result is not None
return self._result.is_eof()
def is_bof(self) -> bool:
"""Returns True is scrollable cursor is positioned at the beginning.
"""
assert self._result is not None
return self._result.is_bof()
# Properties
@property
def connection(self) -> Connection:
"""Connection associated with cursor.
"""
return self._connection
@property
def log_context(self) -> Connection:
return self._connection
@property
def statement(self) -> Statement:
"""Executed `Statement` or None if cursor does not executed a statement yet.
"""
return self._stmt
@property
def description(self) -> Tuple[DESCRIPTION]:
"""Tuple of DESCRIPTION tuples (with 7-items).
Each of these tuples contains information describing one result column:
(name, type_code, display_size, internal_size, precision, scale, null_ok)
"""
if self._stmt is None:
return []
if self._stmt._desc is None:
desc = []
for meta in self._stmt._out_desc:
scale = meta.scale
precision = 0
if meta.datatype in [SQLDataType.TEXT, SQLDataType.VARYING]:
vtype = str
if meta.subtype in (4, 69): # UTF8 and GB18030
dispsize = meta.length // 4
elif meta.subtype == 3: # UNICODE_FSS
dispsize = meta.length // 3
else:
dispsize = meta.length
elif (meta.datatype in [SQLDataType.SHORT, SQLDataType.LONG, SQLDataType.INT64]
and (meta.subtype or meta.scale)):
vtype = decimal.Decimal
precision = self._connection._determine_field_precision(meta)
dispsize = 20
elif meta.datatype == SQLDataType.SHORT:
vtype = int
dispsize = 6
elif meta.datatype == SQLDataType.LONG:
vtype = int
dispsize = 11
elif meta.datatype == SQLDataType.INT64:
vtype = int
dispsize = 20
elif meta.datatype in [SQLDataType.FLOAT, SQLDataType.D_FLOAT, SQLDataType.DOUBLE]:
# Special case, dialect 1 DOUBLE/FLOAT
# could be Fixed point
if (self._stmt._dialect < 3) and meta.scale:
vtype = decimal.Decimal
precision = self._connection._determine_field_precision(meta)
else:
vtype = float
dispsize = 17
elif meta.datatype == SQLDataType.BLOB:
vtype = str if meta.subtype == 1 else bytes
scale = meta.subtype
dispsize = 0
elif meta.datatype == SQLDataType.TIMESTAMP:
vtype = datetime.datetime
dispsize = 22
elif meta.datatype == SQLDataType.DATE:
vtype = datetime.date
dispsize = 10
elif meta.datatype == SQLDataType.TIME:
vtype = datetime.time
dispsize = 11
elif meta.datatype == SQLDataType.ARRAY:
vtype = list
dispsize = -1
elif meta.datatype == SQLDataType.BOOLEAN:
vtype = bool
dispsize = 5
else:
vtype = None
dispsize = -1
desc.append(tuple([meta.field if meta.field == meta.alias else meta.alias,
vtype, dispsize, meta.length, precision,
scale, meta.nullable]))
self._stmt._desc = tuple(desc)
return self._stmt._desc
@property
def affected_rows(self) -> int:
"""Specifies the number of rows that the last `.execute` or `.open`
produced (for DQL statements like select) or affected (for DML statements
like update or insert ).
The attribute is -1 in case no statement was executed on the cursor
or the rowcount of the last operation is not determinable by the interface.
Note:
The database engine's own support for the determination of
“rows affected”/”rows selected” is quirky. The database engine only
supports the determination of rowcount for INSERT, UPDATE, DELETE,
and SELECT statements. When stored procedures become involved, row
count figures are usually not available to the client.
"""
if self._stmt is None:
return -1
result = -1
if (self._executed and self._stmt.type in [StatementType.SELECT,
StatementType.INSERT,
StatementType.UPDATE,
StatementType.DELETE]):
info = create_string_buffer(64)
self._stmt._istmt.get_info(bytes([23, 1]), info) # bytes(isc_info_sql_records, isc_info_end)
if ord(info[0]) != 23: # pragma: no cover
raise InterfaceError("Cursor.affected_rows:\n"
"first byte must be 'isc_info_sql_records'")
res_walk = 3
while ord(info[res_walk]) != isc_info_end:
cur_count_type = ord(info[res_walk])
res_walk += 1
size = (0).from_bytes(info[res_walk:res_walk + 2], 'little')
res_walk += 2
count = (0).from_bytes(info[res_walk:res_walk + size], 'little')
if ((cur_count_type == 13 and self._stmt.type == StatementType.SELECT)
or (cur_count_type == 14 and self._stmt.type == StatementType.INSERT)
or (cur_count_type == 15 and self._stmt.type == StatementType.UPDATE)
or (cur_count_type == 16 and self._stmt.type == StatementType.DELETE)):
result = count
res_walk += size
return result
rowcount = affected_rows
@property
def transaction(self) -> TransactionManager:
"""Transaction manager associated with cursor.
"""
return self._transaction
@property
def name(self) -> str:
"""Name set for cursor.
"""
return self._name
class ServerInfoProvider(InfoProvider):
"""Provides access to information about attached server.
Important:
Do NOT create instances of this class directly! Use `Server.info` property to access
the instance already bound to connectected server.
"""
def __init__(self, charset: str, server: Server):
super().__init__(charset)
self._srv: Server = weakref.ref(server)
# Get Firebird engine version
self.__version = _engine_version_provider.get_server_version(self._srv)
x = self.__version.split('.')
self.__engine_version = float(f'{x[0]}.{x[1]}')
def _close(self) -> None:
"""Drops the association with attached server.
"""
self._srv = None
def _acquire(self, request: bytes) -> None:
"""Acquires information from associated attachment. Information is stored in native
format in `response` buffer.
Arguments:
request: Data specifying the required information.
"""
self._srv()._svc.query(None, request, self.response.raw)
def get_info(self, info_code: SrvInfoCode) -> Any:
"""Returns requested information from connected server.
Arguments:
info_code: A code specifying the required information.
Returns:
The data type of returned value depends on information required.
"""
if info_code in self._cache:
return self._cache[info_code]
self.response.clear()
request = bytes([info_code])
self._get_data(request)
tag = self.response.get_tag()
if (tag != info_code.value):
if tag == isc_info_error: # pragma: no cover
raise InterfaceError("An error response was received")
else: # pragma: no cover
raise InterfaceError("Result code does not match request code")
#
if info_code in (SrvInfoCode.VERSION, SrvInfoCode.CAPABILITIES, SrvInfoCode.RUNNING):
result = self.response.read_int()
elif info_code in (SrvInfoCode.SERVER_VERSION, SrvInfoCode.IMPLEMENTATION,
SrvInfoCode.GET_ENV, SrvInfoCode.GET_ENV_MSG,
SrvInfoCode.GET_ENV_LOCK, SrvInfoCode.USER_DBPATH):
result = self.response.read_sized_string(encoding=self._srv().encoding)
elif info_code == SrvInfoCode.SRV_DB_INFO:
num_attachments = -1
databases = []
while not self.response.is_eof():
tag = self.response.get_tag()
if tag == SrvInfoCode.TIMEOUT:
return None
elif tag == SrvDbInfoOption.ATT:
num_attachments = self.response.read_short()
elif tag == SPBItem.DBNAME:
databases.append(self.response.read_sized_string(encoding=self._srv().encoding))
elif tag == SrvDbInfoOption.DB:
self.response.read_short()
result = (num_attachments, databases)
if self.response.get_tag() != isc_info_end: # pragma: no cover
raise InterfaceError("Malformed result buffer (missing isc_info_end item)")
# cache
if info_code in (SrvInfoCode.SERVER_VERSION, SrvInfoCode.VERSION,
SrvInfoCode.IMPLEMENTATION, SrvInfoCode.GET_ENV,
SrvInfoCode.USER_DBPATH, SrvInfoCode.GET_ENV_LOCK,
SrvInfoCode.GET_ENV_MSG, SrvInfoCode.CAPABILITIES):
self._cache[info_code] = result
return result
def get_log(self, callback: CB_OUTPUT_LINE=None) -> None:
"""Request content of Firebird Server log. **(ASYNC service)**
Arguments:
callback: Function to call back with each output line.
"""
assert self._srv()._svc is not None
self._srv()._reset_output()
self._srv()._svc.start(bytes([ServerAction.GET_FB_LOG]))
if callback:
for line in self._srv():
callback(line)
@property
def version(self) -> str:
"""Firebird version as SEMVER string.
"""
return self.__version
@property
def engine_version(self) -> float:
"""Firebird version as <major>.<minor> float number.
"""
return self.__engine_version
@property
def manager_version(self) -> int:
"""Service manager version.
"""
return self.get_info(SrvInfoCode.VERSION)
@property
def architecture(self) -> str:
"""Server implementation description.
"""
return self.get_info(SrvInfoCode.IMPLEMENTATION)
@property
def home_directory(self) -> str:
"""Server home directory.
"""
return self.get_info(SrvInfoCode.GET_ENV)
@property
def security_database(self) -> str:
"""Path to security database.
"""
return self.get_info(SrvInfoCode.USER_DBPATH)
@property
def lock_directory(self) -> str:
"""Directory with lock file(s).
"""
return self.get_info(SrvInfoCode.GET_ENV_LOCK)
@property
def message_directory(self) -> str:
"""Directory with message file(s).
"""
return self.get_info(SrvInfoCode.GET_ENV_MSG)
@property
def capabilities(self) -> ServerCapability:
"""Server capabilities.
"""
return ServerCapability(self.get_info(SrvInfoCode.CAPABILITIES))
@property
def connection_count(self) -> int:
"""Number of database attachments.
"""
return self.get_info(SrvInfoCode.SRV_DB_INFO)[0]
@property
def attached_databases(self) -> List[str]:
"""List of attached databases.
"""
return self.get_info(SrvInfoCode.SRV_DB_INFO)[1]
class ServerServiceProvider:
"""Base class for server service providers.
"""
def __init__(self, server: Server):
self._srv: Server = weakref.ref(server)
def _close(self) -> None:
self._srv = None
class ServerDbServices3(ServerServiceProvider):
"""Database-related actions and services [Firebird 3+].
"""
def get_statistics(self, *, database: FILESPEC,
flags: SrvStatFlag=SrvStatFlag.DEFAULT, role: str=None,
tables: Sequence[str]=None, callback: CB_OUTPUT_LINE=None) -> None:
"""Return database statistics produced by gstat utility. **(ASYNC service)**
Arguments:
database: Database specification or alias.
flags: Flags indicating which statistics shall be collected.
role: SQL ROLE name passed to gstat.
tables: List of database tables whose statistics are to be collected.
callback: Function to call back with each output line.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.DB_STATS)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
spb.insert_int(SPBItem.OPTIONS, flags)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
if tables is not None:
for table in tables:
spb.insert_string(64, table, encoding=self._srv().encoding) # isc_spb_sts_table = 64
self._srv()._svc.start(spb.get_buffer())
if callback:
for line in self._srv():
callback(line)
def backup(self, *, database: FILESPEC, backup: Union[FILESPEC, Sequence[FILESPEC]],
backup_file_sizes: Sequence[int]=(),
flags: SrvBackupFlag=SrvBackupFlag.NONE, role: str=None,
callback: CB_OUTPUT_LINE=None, stats: str=None,
verbose: bool=False, skip_data: str=None, include_data: str=None,
keyhoder: str=None, keyname: str=None, crypt: str=None) -> None:
"""Request logical (GBAK) database backup. **(ASYNC service)**
Arguments:
database: Database file specification or alias.
backup: Backup filespec, or list of backup file specifications.
backup_file_sizes: List of file sizes for backup files.
flags: Backup options.
role: SQL ROLE name passed to gbak.
callback: Function to call back with each output line.
stats: Backup statistic options (TDWR).
verbose: Whether output should be verbose or not.
skip_data: String with table names whose data should be excluded from backup.
include_data: String with table names whose data should be included into backup [Firebird 4].
keyholder: Keyholder name [Firebird 4]
keyname: Key name [Firebird 4]
crypt: Encryption specification [Firebird 4]
"""
if isinstance(backup, (str, Path)):
backup = [backup]
assert len(backup_file_sizes) == 0
else:
assert len(backup) >= 1
assert len(backup) == len(backup_file_sizes) - 1
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.BACKUP)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
for filename, size in itertools.zip_longest(backup, backup_file_sizes):
spb.insert_string(SrvBackupOption.FILE, str(filename), encoding=self._srv().encoding)
if size is not None:
spb.insert_int(SrvBackupOption.LENGTH, size)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
if skip_data is not None:
spb.insert_string(SrvBackupOption.SKIP_DATA, skip_data)
if include_data is not None:
spb.insert_string(SrvBackupOption.INCLUDE_DATA, include_data)
if keyhoder is not None:
spb.insert_string(SrvBackupOption.KEYHOLDER, keyhoder)
if keyname is not None:
spb.insert_string(SrvBackupOption.KEYNAME, keyname)
if crypt is not None:
spb.insert_string(SrvBackupOption.CRYPT, crypt)
spb.insert_int(SPBItem.OPTIONS, flags)
if verbose:
spb.insert_tag(SPBItem.VERBOSE)
if stats:
spb.insert_string(SrvBackupOption.STAT, stats)
self._srv()._svc.start(spb.get_buffer())
if callback:
for line in self._srv():
callback(line)
def restore(self, *, backup: Union[FILESPEC, Sequence[FILESPEC]],
database: Union[FILESPEC, Sequence[FILESPEC]],
db_file_pages: Sequence[int]=(),
flags: SrvRestoreFlag=SrvRestoreFlag.CREATE, role: str=None,
callback: CB_OUTPUT_LINE=None, stats: str=None,
verbose: bool=True, skip_data: str=None, page_size: int=None,
buffers: int=None, access_mode: DbAccessMode=DbAccessMode.READ_WRITE,
include_data: str=None, keyhoder: str=None, keyname: str=None,
crypt: str=None, replica_mode: ReplicaMode=None) -> None:
"""Request database restore from logical (GBAK) backup. **(ASYNC service)**
Arguments:
backup: Backup filespec, or list of backup file specifications.
database: Database specification or alias, or list of those.
db_file_pages: List of database file sizes (in pages).
flags: Restore options.
role: SQL ROLE name passed to gbak.
callback: Function to call back with each output line.
stats: Restore statistic options (TDWR).
verbose: Whether output should be verbose or not.
skip_data: String with table names whose data should be excluded from restore.
page_size: Page size for restored database.
buffers: Cache size for restored database.
access_mode: Restored database access mode (R/W or R/O).
include_data: String with table names whose data should be included into backup [Firebird 4].
keyholder: Keyholder name [Firebird 4]
keyname: Key name [Firebird 4]
crypt: Encryption specification [Firebird 4]
replica_mode: Replica mode for restored database [Firebird 4]
"""
if isinstance(backup, (str, Path)):
backup = [backup]
if isinstance(database, (str, Path)):
database = [database]
assert len(db_file_pages) == 0
else:
assert len(database) >= 1
assert len(database) - 1 == len(db_file_pages)
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.RESTORE)
for filename in backup:
spb.insert_string(SrvRestoreOption.FILE, str(filename), encoding=self._srv().encoding)
for filename, size in itertools.zip_longest(database, db_file_pages):
spb.insert_string(SPBItem.DBNAME, str(filename), encoding=self._srv().encoding)
if size is not None:
spb.insert_int(SrvRestoreOption.LENGTH, size)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
if page_size is not None:
spb.insert_int(SrvRestoreOption.PAGE_SIZE, page_size)
if buffers is not None:
spb.insert_int(SrvRestoreOption.BUFFERS, buffers)
spb.insert_bytes(SrvRestoreOption.ACCESS_MODE, bytes([access_mode]))
if skip_data is not None:
spb.insert_string(SrvRestoreOption.SKIP_DATA, skip_data, encoding=self._srv().encoding)
if include_data is not None:
spb.insert_string(SrvRestoreOption.INCLUDE_DATA, include_data, encoding=self._srv().encoding)
if keyhoder is not None:
spb.insert_string(SrvRestoreOption.KEYHOLDER, keyhoder)
if keyname is not None:
spb.insert_string(SrvRestoreOption.KEYNAME, keyname)
if crypt is not None:
spb.insert_string(SrvRestoreOption.CRYPT, crypt)
if replica_mode is not None:
spb.insert_int(SrvRestoreOption.REPLICA_MODE, replica_mode.value)
spb.insert_int(SPBItem.OPTIONS, flags)
if verbose:
spb.insert_tag(SPBItem.VERBOSE)
if stats:
spb.insert_string(SrvRestoreOption.STAT, stats)
self._srv()._svc.start(spb.get_buffer())
if callback:
for line in self._srv():
callback(line)
def local_backup(self, *, database: FILESPEC, backup_stream: BinaryIO,
flags: SrvBackupFlag=SrvBackupFlag.NONE, role: str=None,
skip_data: str=None, include_data: str=None, keyhoder: str=None,
keyname: str=None, crypt: str=None) -> None:
"""Request logical (GBAK) database backup into local byte stream. **(SYNC service)**
Arguments:
database: Database specification or alias.
backup_stream: Binary stream to which the backup is to be written.
flags: Backup options.
role: SQL ROLE name passed to gbak.
skip_data: String with table names whose data should be excluded from backup.
include_data: String with table names whose data should be included into backup [Firebird 4].
keyholder: Keyholder name [Firebird 4]
keyname: Key name [Firebird 4]
crypt: Encryption specification [Firebird 4]
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.BACKUP)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
spb.insert_string(SrvBackupOption.FILE, 'stdout')
spb.insert_int(SPBItem.OPTIONS, flags)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
if skip_data is not None:
spb.insert_string(SrvBackupOption.SKIP_DATA, skip_data,
encoding=self._srv().encoding)
if include_data is not None:
spb.insert_string(SrvBackupOption.INCLUDE_DATA, include_data,
encoding=self._srv().encoding)
if keyhoder is not None:
spb.insert_string(SrvBackupOption.KEYHOLDER, keyhoder)
if keyname is not None:
spb.insert_string(SrvBackupOption.KEYNAME, keyname)
if crypt is not None:
spb.insert_string(SrvBackupOption.CRYPT, crypt)
self._srv()._svc.start(spb.get_buffer())
while not self._srv()._eof:
backup_stream.write(self._srv()._read_next_binary_output())
def local_restore(self, *, backup_stream: BinaryIO,
database: Union[FILESPEC, Sequence[FILESPEC]],
db_file_pages: Sequence[int]=(),
flags: SrvRestoreFlag=SrvRestoreFlag.CREATE, role: str=None,
skip_data: str=None, page_size: int=None, buffers: int=None,
access_mode: DbAccessMode=DbAccessMode.READ_WRITE,
include_data: str=None, keyhoder: str=None, keyname: str=None,
crypt: str=None, replica_mode: ReplicaMode=None) -> None:
"""Request database restore from logical (GBAK) backup stored in local byte stream.
**(SYNC service)**
Arguments:
backup_stream: Binary stream with the backup.
database: Database specification or alias, or list of those.
db_file_pages: List of database file sizes (in pages).
flags: Restore options.
role: SQL ROLE name passed to gbak.
skip_data: String with table names whose data should be excluded from restore.
page_size: Page size for restored database.
buffers: Cache size for restored database.
access_mode: Restored database access mode (R/W or R/O).
include_data: String with table names whose data should be included into backup [Firebird 4].
keyholder: Keyholder name [Firebird 4]
keyname: Key name [Firebird 4]
crypt: Encryption specification [Firebird 4]
replica_mode: Replica mode for restored database [Firebird 4]
"""
if isinstance(database, (str, Path)):
database = [database]
assert len(db_file_pages) == 0
else:
assert len(database) >= 1
assert len(database) == len(db_file_pages) - 1
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.RESTORE)
spb.insert_string(SrvRestoreOption.FILE, 'stdin')
for filename, size in itertools.zip_longest(database, db_file_pages):
spb.insert_string(SPBItem.DBNAME, str(filename), encoding=self._srv().encoding)
if size is not None:
spb.insert_int(SrvRestoreOption.LENGTH, size)
if page_size is not None:
spb.insert_int(SrvRestoreOption.PAGE_SIZE, page_size)
if buffers is not None:
spb.insert_int(SrvRestoreOption.BUFFERS, buffers)
spb.insert_bytes(SrvRestoreOption.ACCESS_MODE, bytes([access_mode]))
if skip_data is not None:
spb.insert_string(SrvRestoreOption.SKIP_DATA, skip_data,
encoding=self._srv().encoding)
if include_data is not None:
spb.insert_string(SrvRestoreOption.INCLUDE_DATA, include_data,
encoding=self._srv().encoding)
if keyhoder is not None:
spb.insert_string(SrvRestoreOption.KEYHOLDER, keyhoder)
if keyname is not None:
spb.insert_string(SrvRestoreOption.KEYNAME, keyname)
if crypt is not None:
spb.insert_string(SrvRestoreOption.CRYPT, crypt)
if replica_mode is not None:
spb.insert_int(SrvRestoreOption.REPLICA_MODE, replica_mode.value)
spb.insert_int(SPBItem.OPTIONS, flags)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
self._srv()._svc.start(spb.get_buffer())
#
request_length = 0
line = ''
keep_going = True
while keep_going:
no_data = False
self._srv().response.clear()
if request_length > 0:
request_length = min([request_length, 65500])
raw = backup_stream.read(request_length)
send = b''.join([SrvInfoCode.LINE.to_bytes(1, 'little'),
len(raw).to_bytes(2, 'little'), raw,
isc_info_end.to_bytes(1, 'little')])
else:
send = None
self._srv()._svc.query(send, bytes([SrvInfoCode.STDIN, SrvInfoCode.LINE]),
self._srv().response.raw)
tag = self._srv().response.get_tag()
while tag != isc_info_end:
if tag == SrvInfoCode.STDIN:
request_length = self._srv().response.read_int()
elif tag == SrvInfoCode.LINE:
line = self._srv().response.read_sized_string(encoding=self._srv().encoding)
elif tag == isc_info_data_not_ready:
no_data = True
else: # pragma: no cover
raise InterfaceError(f"Service responded with error code: {tag}")
tag = self._srv().response.get_tag()
keep_going = no_data or request_length != 0 or len(line) > 0
def nbackup(self, *, database: FILESPEC, backup: FILESPEC, level: int=0,
direct: bool=None, flags: SrvNBackupFlag=SrvNBackupFlag.NONE,
role: str=None, guid: str=None) -> None:
"""Perform physical (NBACKUP) database backup. **(SYNC service)**
Arguments:
database: Database specification or alias.
backup: Backup file specification.
level: Backup level.
direct: Direct I/O override.
flags: Backup options.
role: SQL ROLE name passed to nbackup.
guid: Database backup GUID.
Important:
Parameters `level` and `guid` are mutually exclusive. If `guid` is specified,
then `level` value is ignored.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.NBAK)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
spb.insert_string(SrvNBackupOption.FILE, str(backup), encoding=self._srv().encoding)
if guid is not None:
spb.insert_string(SrvNBackupOption.GUID, guid)
else:
spb.insert_int(SrvNBackupOption.LEVEL, level)
if direct is not None:
spb.insert_string(SrvNBackupOption.DIRECT, 'ON' if direct else 'OFF')
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SPBItem.OPTIONS, flags)
self._srv()._svc.start(spb.get_buffer())
self._srv().wait()
def nrestore(self, *, backups: Sequence[FILESPEC], database: FILESPEC,
direct: bool=False, flags: SrvNBackupFlag=SrvNBackupFlag.NONE,
role: str=None) -> None:
"""Perform restore from physical (NBACKUP) database backup. **(SYNC service)**
Arguments:
backups: Backup file(s) specification.
database: Database specification or alias.
direct: Direct I/O override.
flags: Restore options.
role: SQL ROLE name passed to nbackup.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.NREST)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
for backup in backups:
spb.insert_string(SrvNBackupOption.FILE, str(backup), encoding=self._srv().encoding)
if direct is not None:
spb.insert_string(SrvNBackupOption.DIRECT, 'ON' if direct else 'OFF')
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SPBItem.OPTIONS, flags)
self._srv()._svc.start(spb.get_buffer())
self._srv().wait()
def set_default_cache_size(self, *, database: FILESPEC, size: int, role: str=None) -> None:
"""Set individual page cache size for database.
Arguments:
database: Database specification or alias.
size: New value.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SrvPropertiesOption.PAGE_BUFFERS, size)
self._srv()._svc.start(spb.get_buffer())
def set_sweep_interval(self, *, database: FILESPEC, interval: int, role: str=None) -> None:
"""Set database sweep interval.
Arguments:
database: Database specification or alias.
interval: New value.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SrvPropertiesOption.SWEEP_INTERVAL, interval)
self._srv()._svc.start(spb.get_buffer())
def set_space_reservation(self, *, database: FILESPEC, mode: DbSpaceReservation,
role: str=None) -> None:
"""Set space reservation for database.
Arguments:
database: Database specification or alias.
mode: New value.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_bytes(SrvPropertiesOption.RESERVE_SPACE,
bytes([mode]))
self._srv()._svc.start(spb.get_buffer())
def set_write_mode(self, *, database: FILESPEC, mode: DbWriteMode, role: str=None) -> None:
"""Set database write mode (SYNC/ASYNC).
Arguments:
database: Database specification or alias.
mode: New value.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_bytes(SrvPropertiesOption.WRITE_MODE,
bytes([mode]))
self._srv()._svc.start(spb.get_buffer())
def set_access_mode(self, *, database: FILESPEC, mode: DbAccessMode, role: str=None) -> None:
"""Set database access mode (R/W or R/O).
Arguments:
database: Database specification or alias.
mode: New value.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_bytes(SrvPropertiesOption.ACCESS_MODE,
bytes([mode]))
self._srv()._svc.start(spb.get_buffer())
def set_sql_dialect(self, *, database: FILESPEC, dialect: int, role: str=None) -> None:
"""Set database SQL dialect.
Arguments:
database: Database specification or alias.
dialect: New value.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SrvPropertiesOption.SET_SQL_DIALECT, dialect)
self._srv()._svc.start(spb.get_buffer())
def activate_shadow(self, *, database: FILESPEC, role: str=None) -> None:
"""Activate database shadow.
Arguments:
database: Database specification or alias.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SPBItem.OPTIONS, SrvPropertiesFlag.ACTIVATE)
self._srv()._svc.start(spb.get_buffer())
def no_linger(self, *, database: FILESPEC, role: str=None) -> None:
"""Set one-off override for database linger.
Arguments:
database: Database specification or alias.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SPBItem.OPTIONS, SrvPropertiesFlag.NOLINGER)
self._srv()._svc.start(spb.get_buffer())
def shutdown(self, *, database: FILESPEC, mode: ShutdownMode,
method: ShutdownMethod, timeout: int, role: str=None) -> None:
"""Database shutdown.
Arguments:
database: Database specification or alias.
mode: Shutdown mode.
method: Shutdown method.
timeout: Timeout for shutdown.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_bytes(SrvPropertiesOption.SHUTDOWN_MODE, bytes([mode]))
spb.insert_int(method, timeout)
self._srv()._svc.start(spb.get_buffer())
def bring_online(self, *, database: FILESPEC, mode: OnlineMode=OnlineMode.NORMAL,
role: str=None) -> None:
"""Bring previously shut down database back online.
Arguments:
database: Database specification or alias.
mode: Online mode.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.PROPERTIES)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_bytes(SrvPropertiesOption.ONLINE_MODE, bytes([mode]))
self._srv()._svc.start(spb.get_buffer())
def sweep(self, *, database: FILESPEC, role: str=None) -> None:
"""Perform database sweep operation.
Arguments:
database: Database specification or alias.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.REPAIR)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SPBItem.OPTIONS, SrvRepairFlag.SWEEP_DB)
self._srv()._svc.start(spb.get_buffer())
self._srv().wait()
def repair(self, *, database: FILESPEC, flags: SrvRepairFlag=SrvRepairFlag.REPAIR,
role: str=None) -> bytes:
"""Perform database repair operation. **(SYNC service)**
Arguments:
database: Database specification or alias.
flags: Repair flags.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.REPAIR)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SPBItem.OPTIONS, flags)
self._srv()._svc.start(spb.get_buffer())
self._srv().wait()
def validate(self, *, database: FILESPEC, include_table: str=None,
exclude_table: str=None, include_index: str=None,
exclude_index: str=None, lock_timeout: int=None, role: str=None,
callback: CB_OUTPUT_LINE=None) -> None:
"""Perform database validation. **(ASYNC service)**
Arguments:
database: Database specification or alias.
flags: Repair flags.
include_table: Regex pattern for table names to include in validation run.
exclude_table: Regex pattern for table names to exclude in validation run.
include_index: Regex pattern for index names to include in validation run.
exclude_index: Regex pattern for index names to exclude in validation run.
lock_timeout: Lock timeout (seconds), used to acquire locks for table to validate,
default is 10 secs. 0 is no-wait, -1 is infinite wait.
role: SQL ROLE name passed to gfix.
callback: Function to call back with each output line.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.VALIDATE)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if include_table is not None:
spb.insert_string(SrvValidateOption.INCLUDE_TABLE, include_table,
encoding=self._srv().encoding)
if exclude_table is not None:
spb.insert_string(SrvValidateOption.EXCLUDE_TABLE, exclude_table,
encoding=self._srv().encoding)
if include_index is not None:
spb.insert_string(SrvValidateOption.INCLUDE_INDEX, include_index,
encoding=self._srv().encoding)
if exclude_index is not None:
spb.insert_string(SrvValidateOption.EXCLUDE_INDEX, exclude_index,
encoding=self._srv().encoding)
if lock_timeout is not None:
spb.insert_int(SrvValidateOption.LOCK_TIMEOUT, lock_timeout)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
self._srv()._svc.start(spb.get_buffer())
if callback:
for line in self._srv():
callback(line)
def get_limbo_transaction_ids(self, *, database: FILESPEC) -> List[int]:
"""Returns list of transactions in limbo.
Arguments:
database: Database specification or alias.
"""
#raise NotImplementedError
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction .REPAIR)
spb.insert_string(SPBItem.DBNAME, str(database))
spb.insert_int(SPBItem.OPTIONS, SrvRepairFlag.LIST_LIMBO_TRANS)
self._srv()._svc.start(spb.get_buffer())
self._srv()._reset_output()
self._srv()._fetch_complex_info(bytes([SrvInfoCode.LIMBO_TRANS]))
trans_ids = []
while not self._srv().response.is_eof():
tag = self._srv().response.get_tag()
if tag == SrvInfoCode.TIMEOUT:
return None
elif tag == SrvInfoCode.LIMBO_TRANS:
size = self._srv().response.read_short()
while not self._srv().response.is_eof() and self._srv().response.pos < size:
tag = self._srv().response.get_tag()
if tag == SvcRepairOption.TRA_HOST_SITE:
site = self._srv().response.get_string()
elif tag == SvcRepairOption.TRA_STATE:
tag = self._srv().response.get_tag()
if tag == SvcRepairOption.TRA_STATE_LIMBO:
state = TransactionState.LIMBO
elif tag == SvcRepairOption.TRA_STATE_COMMIT:
state = TransactionState.COMMIT
elif tag == SvcRepairOption.TRA_STATE_ROLLBACK:
state = TransactionState.ROLLBACK
elif tag == SvcRepairOption.TRA_STATE_UNKNOWN:
state = TransactionState.UNKNOWN
else:
raise InterfaceError(f"Unknown transaction state {tag}")
elif tag == SvcRepairOption.TRA_REMOTE_SITE:
remote_site = self._srv().response.get_string()
elif tag == SvcRepairOption.TRA_DB_PATH:
db_path = self._srv().response.get_string()
elif tag == SvcRepairOption.TRA_ADVISE:
tag = self._srv().response.get_tag()
if tag == SvcRepairOption.TRA_ADVISE_COMMIT:
advise = TransactionState.COMMIT
elif tag == SvcRepairOption.TRA_ADVISE_ROLLBACK:
advise = TransactionState.ROLLBACK
elif tag == SvcRepairOption.TRA_ADVISE_UNKNOWN:
advise = TransactionState.UNKNOWN
else:
raise InterfaceError(f"Unknown transaction state {tag}")
elif tag == SvcRepairOption.MULTI_TRA_ID:
multi_id = self._srv().response.get_int()
elif tag == SvcRepairOption.SINGLE_TRA_ID:
single_id = self._srv().response.get_int()
elif tag == SvcRepairOption.TRA_ID:
tra_id = self._srv().response.get_int()
elif tag == SvcRepairOption.MULTI_TRA_ID_64:
multi_id = self._srv().response.get_int64()
elif tag == SvcRepairOption.SINGLE_TRA_ID_64:
single_id = self._srv().response.get_int64()
elif tag == SvcRepairOption.TRA_ID_64:
tra_id = self._srv().response.get_int64()
else:
raise InterfaceError(f"Unknown transaction state {tag}")
trans_ids.append(None)
if self._srv().response.get_tag() != isc_info_end:
raise InterfaceError("Malformed result buffer (missing isc_info_end item)")
return trans_ids
def commit_limbo_transaction(self, *, database: FILESPEC, transaction_id: int) -> None:
"""Resolve limbo transaction with commit.
Arguments:
database: Database specification or alias.
transaction_id: ID of Transaction to resolve.
"""
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.REPAIR)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if transaction_id <= USHRT_MAX:
spb.insert_int(SrvRepairOption.COMMIT_TRANS, transaction_id)
else:
spb.insert_bigint(SrvRepairOption.COMMIT_TRANS_64, transaction_id)
self._srv()._svc.start(spb.get_buffer())
self._srv()._read_all_binary_output()
def rollback_limbo_transaction(self, *, database: FILESPEC, transaction_id: int) -> None:
"""Resolve limbo transaction with rollback.
Arguments:
database: Database specification or alias.
transaction_id: ID of Transaction to resolve.
"""
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.REPAIR)
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if transaction_id <= USHRT_MAX:
spb.insert_int(SrvRepairOption.ROLLBACK_TRANS, transaction_id)
else:
spb.insert_bigint(SrvRepairOption.ROLLBACK_TRANS_64, transaction_id)
self._srv()._svc.start(spb.get_buffer())
self._srv()._read_all_binary_output()
class ServerDbServices(ServerDbServices3):
"""Database-related actions and services [Firebird 4+].
"""
def nfix_database(self, *, database: FILESPEC, role: str=None,
flags: SrvNBackupFlag=SrvNBackupFlag.NONE) -> None:
"""Fixup database after filesystem copy.
Arguments:
database: Database specification or alias.
role: SQL ROLE name passed to nbackup.
flags: Backup options.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_tag(ServerAction.NFIX)
spb.insert_int(SPBItem.OPTIONS, flags)
self._srv()._svc.start(spb.get_buffer())
self._srv().wait()
def set_replica_mode(self, *, database: FILESPEC, mode: ReplicaMode, role: str=None) -> None:
"""Manage replica database.
Arguments:
database: Database specification or alias.
mode: New replication mode.
role: SQL ROLE name passed to gfix.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, role, encoding=self._srv().encoding)
spb.insert_int(SrvPropertiesOption.REPLICA_MODE, mode.value)
self._srv()._svc.start(spb.get_buffer())
self._srv().wait()
class ServerUserServices(ServerServiceProvider):
"""User-related actions and services.
"""
def __fetch_users(self, data: Buffer) -> List[UserInfo]:
users = []
user = {}
while not data.is_eof():
tag = data.get_tag()
if tag == SrvUserOption.USER_NAME:
if user:
users.append(UserInfo(**user))
user.clear()
user['user_name'] = data.read_sized_string(encoding=self._srv().encoding)
elif tag == SrvUserOption.USER_ID:
user['user_id'] = data.read_int()
elif tag == SrvUserOption.GROUP_ID:
user['group_id'] = data.read_int()
elif tag == SrvUserOption.PASSWORD: # pragma: no cover
user['password'] = data.read_bytes()
elif tag == SrvUserOption.GROUP_NAME: # pragma: no cover
user['group_name'] = data.read_sized_string(encoding=self._srv().encoding)
elif tag == SrvUserOption.FIRST_NAME:
user['first_name'] = data.read_sized_string(encoding=self._srv().encoding)
elif tag == SrvUserOption.MIDDLE_NAME:
user['middle_name'] = data.read_sized_string(encoding=self._srv().encoding)
elif tag == SrvUserOption.LAST_NAME:
user['last_name'] = data.read_sized_string(encoding=self._srv().encoding)
elif tag == SrvUserOption.ADMIN:
user['admin'] = bool(data.read_int())
else: # pragma: no cover
raise InterfaceError(f"Unrecognized result clumplet: {tag}")
if user:
users.append(UserInfo(**user))
return users
def get_all(self, *, database: FILESPEC=None, sql_role: str=None) -> List[UserInfo]:
"""Get information about users.
Arguments:
database: Database specification or alias.
sql_role: SQL role name.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.DISPLAY_USER_ADM)
if database is not None:
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if sql_role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, sql_role,
encoding=self._srv().encoding)
self._srv()._svc.start(spb.get_buffer())
return self.__fetch_users(Buffer(self._srv()._read_all_binary_output()))
def get(self, user_name: str, *, database: FILESPEC=None, sql_role: str=None) -> Optional[UserInfo]:
"""Get information about user.
Arguments:
user_name: User name.
database: Database specification or alias.
sql_role: SQL role name.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.DISPLAY_USER_ADM)
if database is not None:
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
spb.insert_string(SrvUserOption.USER_NAME, user_name, encoding=self._srv().encoding)
if sql_role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, sql_role, encoding=self._srv().encoding)
self._srv()._svc.start(spb.get_buffer())
users = self.__fetch_users(Buffer(self._srv()._read_all_binary_output()))
return users[0] if users else None
def add(self, *, user_name: str, password: str, user_id: int=None,
group_id: int=None, first_name: str=None, middle_name: str=None,
last_name: str=None, admin: bool=None, database: FILESPEC=None,
sql_role: str=None) -> None:
"""Add new user.
Arguments:
user_name: User name.
password: User password.
user_id: User ID.
group_id: Group ID.
firest_name: User's first name.
middle_name: User's middle name.
last_name: User's last name.
admin: Admin flag.
database: Database specification or alias.
sql_role: SQL role name.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.ADD_USER)
if database is not None:
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
spb.insert_string(SrvUserOption.USER_NAME, user_name, encoding=self._srv().encoding)
if sql_role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, sql_role, encoding=self._srv().encoding)
spb.insert_string(SrvUserOption.PASSWORD, password,
encoding=self._srv().encoding)
if user_id is not None:
spb.insert_int(SrvUserOption.USER_ID, user_id)
if group_id is not None:
spb.insert_int(SrvUserOption.GROUP_ID, group_id)
if first_name is not None:
spb.insert_string(SrvUserOption.FIRST_NAME, first_name,
encoding=self._srv().encoding)
if middle_name is not None:
spb.insert_string(SrvUserOption.MIDDLE_NAME, middle_name,
encoding=self._srv().encoding)
if last_name is not None:
spb.insert_string(SrvUserOption.LAST_NAME, last_name,
encoding=self._srv().encoding)
if admin is not None:
spb.insert_int(SrvUserOption.ADMIN, 1 if admin else 0)
self._srv()._svc.start(spb.get_buffer())
self._srv().wait()
def update(self, user_name: str, *, password: str=None,
user_id: int=None, group_id: int=None,
first_name: str=None, middle_name: str=None,
last_name: str=None, admin: bool=None, database: FILESPEC=None) -> None:
"""Update user information.
Arguments:
user_name: User name.
password: User password.
user_id: User ID.
group_id: Group ID.
firest_name: User's first name.
middle_name: User's middle name.
last_name: User's last name.
admin: Admin flag.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.MODIFY_USER)
spb.insert_string(SrvUserOption.USER_NAME, user_name,
encoding=self._srv().encoding)
if database is not None:
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if password is not None:
spb.insert_string(SrvUserOption.PASSWORD, password,
encoding=self._srv().encoding)
if user_id is not None:
spb.insert_int(SrvUserOption.USER_ID, user_id)
if group_id is not None:
spb.insert_int(SrvUserOption.GROUP_ID, group_id)
if first_name is not None:
spb.insert_string(SrvUserOption.FIRST_NAME, first_name,
encoding=self._srv().encoding)
if middle_name is not None:
spb.insert_string(SrvUserOption.MIDDLE_NAME, middle_name,
encoding=self._srv().encoding)
if last_name is not None:
spb.insert_string(SrvUserOption.LAST_NAME, last_name,
encoding=self._srv().encoding)
if admin is not None:
spb.insert_int(SrvUserOption.ADMIN, 1 if admin else 0)
self._srv()._svc.start(spb.get_buffer())
self._srv().wait()
def delete(self, user_name: str, *, database: FILESPEC=None, sql_role: str=None) -> None:
"""Delete user.
Arguments:
user_name: User name.
database: Database specification or alias.
sql_role: SQL role name.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.DELETE_USER)
spb.insert_string(SrvUserOption.USER_NAME, user_name, encoding=self._srv().encoding)
if database is not None:
spb.insert_string(SPBItem.DBNAME, str(database), encoding=self._srv().encoding)
if sql_role is not None:
spb.insert_string(SPBItem.SQL_ROLE_NAME, sql_role, encoding=self._srv().encoding)
self._srv()._svc.start(spb.get_buffer())
self._srv().wait()
def exists(self, user_name: str, *, database: FILESPEC=None, sql_role: str=None) -> bool:
"""Returns True if user exists.
Arguments:
user_name: User name.
database: Database specification or alias.
sql_role: SQL role name.
"""
return self.get(user_name, database=database, sql_role=sql_role) is not None
class ServerTraceServices(ServerServiceProvider):
"""Trace session actions and services.
"""
def __action(self, action: ServerAction, label: str, session_id: int) -> str:
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(action)
spb.insert_int(SrvTraceOption.ID, session_id)
self._srv()._svc.start(spb.get_buffer())
response = self._srv()._fetch_line()
if not response.startswith(f"Trace session ID {session_id} {label}"): # pragma: no cover
# response should contain the error message
raise DatabaseError(response)
return response
def start(self, *, config: str, name: str=None) -> int:
"""Start new trace session. **(ASYNC service)**
Arguments:
config: Trace session configuration.
name: Trace session name.
Returns:
Trace session ID.
"""
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.TRACE_START)
if name is not None:
spb.insert_string(SrvTraceOption.NAME, name)
spb.insert_string(SrvTraceOption.CONFIG, config, encoding=self._srv().encoding)
self._srv()._svc.start(spb.get_buffer())
response = self._srv()._fetch_line()
if response.startswith('Trace session ID'):
return int(response.split()[3])
else: # pragma: no cover
# response should contain the error message
raise DatabaseError(response)
def stop(self, *, session_id: int) -> str:
"""Stop trace session.
Arguments:
session_id: Trace session ID.
Returns:
Text message 'Trace session ID <x> stopped'.
"""
return self.__action(ServerAction.TRACE_STOP, 'stopped', session_id)
def suspend(self, *, session_id: int) -> str:
"""Suspend trace session.
Arguments:
session_id: Trace session ID.
Returns:
Text message 'Trace session ID <x> paused'.
"""
return self.__action(ServerAction.TRACE_SUSPEND, 'paused', session_id)
def resume(self, *, session_id: int) -> str:
"""Resume trace session.
Arguments:
session_id: Trace session ID.
Returns:
Text message 'Trace session ID <x> resumed'.
"""
return self.__action(ServerAction.TRACE_RESUME, 'resumed', session_id)
@property
def sessions(self) -> Dict[int, TraceSession]:
"""Dictionary with active trace sessions.
"""
def store():
if current:
session = TraceSession(**current)
result[session.id] = session
current.clear()
self._srv()._reset_output()
with a.get_api().util.get_xpb_builder(XpbKind.SPB_START) as spb:
spb.insert_tag(ServerAction.TRACE_LIST)
self._srv()._svc.start(spb.get_buffer())
result = {}
current = {}
for line in self._srv():
if not line.strip():
store()
elif line.startswith('Session ID:'):
store()
current['id'] = int(line.split(':')[1].strip())
elif line.lstrip().startswith('name:'):
current['name'] = line.split(':')[1].strip()
elif line.lstrip().startswith('user:'):
current['user'] = line.split(':')[1].strip()
elif line.lstrip().startswith('date:'):
current['timestamp'] = datetime.datetime.strptime(
line.split(':', 1)[1].strip(),
'%Y-%m-%d %H:%M:%S')
elif line.lstrip().startswith('flags:'):
current['flags'] = line.split(':')[1].strip().split(',')
else: # pragma: no cover
raise InterfaceError(f"Unexpected line in trace session list: {line}")
store()
return result
class Server(LoggingIdMixin):
"""Represents connection to Firebird Service Manager.
Note:
Implements context manager protocol to call `.close()` automatically.
"""
def __init__(self, svc: iService, spb: bytes, host: str, encoding: str,
encoding_errors: str):
self._svc: iService = svc
#: Service Parameter Buffer (SPB) used to connect the service manager
self.spb: bytes = spb
#: Server host
self.host: str = host
#: Service output mode (line or eof)
self.mode: SrvInfoCode = SrvInfoCode.TO_EOF
#: Response buffer used to comunicate with service
self.response: CBuffer = CBuffer(USHRT_MAX)
self._eof: bool = False
self.__line_buffer: List[str] = []
#: Encoding used for text data exchange with server
self.encoding: str = encoding
#: Handler used for encoding errors. See: `codecs#error-handlers`
self.encoding_errors: str = encoding_errors
#
self.__ev: float = None
self.__info: ServerInfoProvider = None
self.__dbsvc: Union[ServerDbServices, ServerDbServices3] = None
self.__trace: ServerTraceServices = None
self.__user: ServerUserServices = None
def __enter__(self) -> Server:
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
def __del__(self):
if self._svc is not None:
warn(f"Server '{self.logging_id}' disposed without prior close()", ResourceWarning)
self.close()
def __next__(self):
if (line := self.readline()) is not None:
return line
else:
raise StopIteration
def __iter__(self):
return self
def __str__(self):
return f'Server[v{self.info.version}@{self.host.replace(":service_mgr","")}]'
def _engine_version(self) -> float:
if self.__ev is None:
self.__ev = _engine_version_provider.get_engine_version(weakref.ref(self))
return self.__ev
def _reset_output(self) -> None:
self._eof = False
self.__line_buffer.clear()
def _make_request(self, timeout: int) -> bytes:
if timeout == -1:
return None
else:
return b''.join([SrvInfoCode.TIMEOUT.to_bytes(1, 'little'),
(4).to_bytes(2, 'little'),
timeout.to_bytes(4, 'little'), isc_info_end.to_bytes(1, 'little')])
def _fetch_complex_info(self, request: bytes, timeout: int=-1) -> None:
send = self._make_request(timeout)
self.response.clear()
self._svc.query(send, request, self.response.raw)
if self.response.is_truncated(): # pragma: no cover
raise InterfaceError("Requested data can't fint into largest possible buffer")
def _fetch_line(self, timeout: int=-1) -> Optional[str]:
self._fetch_complex_info(bytes([SrvInfoCode.LINE]))
result = None
while not self.response.is_eof():
tag = self.response.get_tag()
if tag == SrvInfoCode.TIMEOUT:
return None
elif tag == SrvInfoCode.LINE:
result = self.response.read_sized_string(encoding=self.encoding)
if self.response.get_tag() != isc_info_end: # pragma: no cover
raise InterfaceError("Malformed result buffer (missing isc_info_end item)")
return result
def _read_output(self, *, init: str='', timeout: int=-1) -> None:
assert self._svc is not None
self.response.clear()
self._svc.query(self._make_request(timeout), bytes([self.mode]), self.response.raw)
tag = self.response.get_tag()
if tag != self.mode: # pragma: no cover
raise InterfaceError(f"Service responded with error code: {tag}")
data = self.response.read_sized_string(encoding=self.encoding, errors=self.encoding_errors)
init += data
if data and self.mode is SrvInfoCode.LINE:
init += '\n'
self.__line_buffer = init.splitlines(keepends=True)
if self.mode is SrvInfoCode.TO_EOF:
self._eof = self.response.get_tag() == isc_info_end
else:
if self.response.get_tag() != isc_info_end: # pragma: no cover
raise InterfaceError("Malformed result buffer (missing isc_info_end item)")
self._eof = not data
def _read_all_binary_output(self, *, timeout: int=-1) -> bytes:
assert self._svc is not None
send = self._make_request(timeout)
result = []
eof = False
while not eof:
self.response.clear()
self._svc.query(send, bytes([SrvInfoCode.TO_EOF]), self.response.raw)
if (tag := self.response.get_tag()) != SrvInfoCode.TO_EOF: # pragma: no cover
raise InterfaceError(f"Service responded with error code: {tag}")
result.append(self.response.read_bytes())
eof = self.response.get_tag() == isc_info_end
return b''.join(result)
def _read_next_binary_output(self, *, timeout: int=-1) -> bytes:
assert self._svc is not None
result = None
if not self._eof:
send = self._make_request(timeout)
self.response.clear()
self._svc.query(send, bytes([SrvInfoCode.TO_EOF]), self.response.raw)
if (tag := self.response.get_tag()) != SrvInfoCode.TO_EOF: # pragma: no cover
raise InterfaceError(f"Service responded with error code: {tag}")
result = self.response.read_bytes()
self._eof = self.response.get_tag() == isc_info_end
return result
def is_running(self) -> bool:
"""Returns True if service is running.
Note:
Some services like `~.ServerDbServices.backup()` or `~.ServerDbServices.sweep()`
may take time to comlete, so they're called asynchronously. Until they're finished,
no other async service could be started.
"""
assert self._svc is not None
return self.info.get_info(SrvInfoCode.RUNNING) > 0
def readline(self) -> Optional[str]:
"""Get next line of textual output from last service query.
"""
if self._eof and not self.__line_buffer:
return None
if not self.__line_buffer:
self._read_output()
elif len(self.__line_buffer) == 1:
line = self.__line_buffer.pop(0)
if self._eof:
return line
self._read_output(init=line)
if self.__line_buffer:
return self.__line_buffer.pop(0)
return None
def readlines(self) -> List[str]:
"""Get list of remaining output lines from last service query.
"""
return [line for line in self]
def wait(self) -> None:
"""Wait until running service completes, i.e. stops sending data.
"""
while self.is_running():
for _ in self:
pass
def close(self) -> None:
"""Close the server connection now (rather than whenever `__del__` is called).
The instance will be unusable from this point forward; an `.Error`
(or subclass) exception will be raised if any operation is attempted
with the instance.
"""
if self.__info is not None:
self.__info._close()
self.__info = None
if self.__dbsvc is not None:
self.__dbsvc._close()
self.__dbsvc = None
if self.__trace is not None:
self.__trace._close()
self.__trace = None
if self.__user is not None:
self.__user._close()
self.__user = None
if self._svc is not None:
# try..finally is necessary to shield from crashed server
# Otherwise close() will be called from __del__ which may crash Python
try:
self._svc.detach()
finally:
self._svc = None
# Properties
@property
def info(self) -> ServerInfoProvider:
"""Access to various information about attached server.
"""
if self.__info is None:
self.__info = ServerInfoProvider(self.encoding, self)
return self.__info
@property
def database(self) -> Union[ServerDbServices3, ServerDbServices]:
"""Access to various database-related actions and services.
"""
if self.__dbsvc is None:
cls = ServerDbServices if self._engine_version() >= 4.0 \
else ServerDbServices3
self.__dbsvc = cls(self)
return self.__dbsvc
@property
def trace(self) -> ServerTraceServices:
"""Access to various database-related actions and services.
"""
if self.__trace is None:
self.__trace = ServerTraceServices(self)
return self.__trace
@property
def user(self) -> ServerUserServices:
"""Access to various user-related actions and services.
"""
if self.__user is None:
self.__user = ServerUserServices(self)
return self.__user
def connect_server(server: str, *, user: str=None, password: str=None,
crypt_callback: iCryptKeyCallbackImpl=None,
expected_db: str=None, role: str=None, encoding: str=None,
encoding_errors: str=None) -> Server:
"""Establishes a connection to server's service manager.
Arguments:
server: Server host machine or Server configuration name.
user: User name.
password: User password.
crypt_callback: Callback that provides encryption key.
expected_db: Database that would be accessed (for using services with non-default
security database)
role: SQL role used for connection.
encoding: Encoding for string values passed in parameter buffer. Default is
`.ServerConfig.encoding`.
encoding_errors: Error handler used for encoding errors. Default is
`.ServerConfig.encoding_errors`.
Hooks:
Event `.ServerHook.ATTACHED`: Executed before `Service` instance is
returned. Hook must have signature::
hook_func(server: Server) -> None
Any value returned by hook is ignored.
"""
srv_config = driver_config.get_server(server)
if srv_config is None:
srv_config = driver_config.server_defaults
host = server
else:
host = srv_config.host.value
if host is None:
host = 'service_mgr'
if not host.endswith('service_mgr'):
if host and not host.endswith(':'):
host += ':'
host += 'service_mgr'
if user is None:
user = srv_config.user.value
if password is None:
password = srv_config.password.value
spb = SPB_ATTACH(user=user, password=password, config=srv_config.config.value,
trusted_auth=srv_config.trusted_auth.value,
auth_plugin_list=srv_config.auth_plugin_list.value,
expected_db=expected_db, role=role)
spb_buf = spb.get_buffer()
with a.get_api().master.get_dispatcher() as provider:
if crypt_callback is not None:
provider.set_dbcrypt_callback(crypt_callback)
svc = provider.attach_service_manager(host, spb_buf)
con = Server(svc, spb_buf, host, srv_config.encoding.value if encoding is None else encoding,
srv_config.encoding_errors.value if encoding_errors is None else encoding_errors)
for hook in get_callbacks(ServerHook.ATTACHED, con):
hook(con)
return con
# Register hookable classes
register_class(Connection, ConnectionHook)
register_class(Server, ServerHook)
del register_class
del add_hook
|
SerialDevice.py
|
import serial
import threading
import time
class SerialDevice():
_maxNameLength = 0
def __init__(self, name, serialDevicePath):
self._serialDevicePath = serialDevicePath
self._name = name
self._rxBuf = ""
self._rxBufClearable = ""
self._port = self._openSerialPort(serialDevicePath)
self._receivedSignals = []
# store longest name to align console printing
if len(name) > SerialDevice._maxNameLength:
_maxNameLength = len(name)
# start receiver thread
self._shouldThreadStop = False
self._lock = threading.Lock()
self._rxThread = threading.Thread(target=SerialDevice._rxHandler, args=(self,), daemon=True)
self._rxThread.start()
print("SerialDevice: Connection created for " + name)
def close(self):
""" Closes the serial transport """
self._shouldThreadStop = True
self._rxThread.join()
self._port.close()
print("SerialDevice: Connection closed for " + self._name)
def getEntireDataReceived(self):
""" Returns the entire data that was received since the connection was opened """
with self._lock:
return self._rxBuf
def getDataReceived(self):
""" Returns the data that was received since last call or since the connection was opened """
with self._lock:
result = self._rxBufClearable
self._rxBufClearable = ""
return result
def sendSynchronous(self, message):
""" Send a message synchronously
:param message: A bytes() object to send
"""
self._port.write(message)
self._port.flush()
def sendSignal(self, identifier):
""" Sends a signal synchronously. Signals can be used to synchronize host and device.
:param identifier: string of length < 32
"""
if not isinstance(identifier, str):
raise TypeError("Expected a string")
if len(identifier) < 1 or len(identifier) > 32:
raise ValueError("Signal identifier must have 1..32 characters")
self.sendSynchronous(str.encode("!!>> Signal:{}\n".format(identifier)))
def wasSignalReceived(self, identifier):
""" Returns True if the requested signal was received from the device
:param identifier: string of length < 32
"""
with self._lock:
return identifier in self._receivedSignals
def awaitSignal(self, identifier, timeoutInMs = 10000):
""" Blocks until the requested signal was received from the device
:param identifier: string of length < 32
:param timeoutInMs: a timeout in ms before an Exception is thrown;
or 0 if signal should be awaited forever
"""
startTimeMs = time.time() * 1000
while not self.wasSignalReceived(identifier):
now = time.time() * 1000
if timeoutInMs > 0 and now - startTimeMs > timeoutInMs:
raise Exception(f"Timeout waiting for signal '{identifier}' on device '{self._name}'")
def _openSerialPort(self, serialDevicePath):
numAttemps = 0
timeoutInMs = 5000
handle = None
startTimeMs = time.time() * 1000
while not handle:
try:
handle = serial.Serial(serialDevicePath, timeout=10)
except serial.SerialException as e:
numAttemps = numAttemps + 1
now = time.time() * 1000
if timeoutInMs > 0 and now - startTimeMs > timeoutInMs:
raise Exception(f"Failed to connect to serial port '{serialDevicePath}' after {numAttemps} attempts")
time.sleep(0.25)
except Exception as e:
raise Exception(f"Exception while opening serial port '{serialDevicePath}': " + str(e))
handle.flushInput()
return handle
def _rxHandler(parent):
while not parent._shouldThreadStop:
line = parent._port.readline()
if not line:
raise Exception("Timeout reading serial device: " + str(parent._serialDevicePath))
# print the received line to the console
decodedLine = line.decode().rstrip()
alignedDeviceName = parent._name.rjust(SerialDevice._maxNameLength)
print(f"{alignedDeviceName} # {decodedLine}")
# append the received line to the parents buffers
with parent._lock:
parent._rxBuf = parent._rxBuf + decodedLine + "\n"
parent._rxBufClearable = parent._rxBufClearable + decodedLine + "\n"
# read signals from the device
if decodedLine.startswith("!!>> Signal:"):
signalIdentifier = decodedLine.removeprefix("!!>> Signal:")
with parent._lock:
if signalIdentifier not in parent._receivedSignals:
parent._receivedSignals.append(signalIdentifier)
|
test_windows_events.py
|
import os
import signal
import socket
import sys
import subprocess
import time
import threading
import unittest
from unittest import mock
if sys.platform != 'win32':
raise unittest.SkipTest('Windows only')
import _overlapped
import _testcapi
import _winapi
import asyncio
from asyncio import windows_events
from asyncio.streams import _StreamProtocol
from test.test_asyncio import utils as test_utils
from test.support.script_helper import spawn_python
def tearDownModule():
asyncio.set_event_loop_policy(None)
class UpperProto(asyncio.Protocol):
def __init__(self):
self.buf = []
def connection_made(self, trans):
self.trans = trans
def data_received(self, data):
self.buf.append(data)
if b'\n' in data:
self.trans.write(b''.join(self.buf).upper())
self.trans.close()
class ProactorLoopCtrlC(test_utils.TestCase):
def test_ctrl_c(self):
def SIGINT_after_delay():
time.sleep(0.1)
signal.raise_signal(signal.SIGINT)
thread = threading.Thread(target=SIGINT_after_delay)
loop = asyncio.get_event_loop()
try:
# only start the loop once the event loop is running
loop.call_soon(thread.start)
loop.run_forever()
self.fail("should not fall through 'run_forever'")
except KeyboardInterrupt:
pass
finally:
self.close_loop(loop)
thread.join()
class ProactorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.ProactorEventLoop()
self.set_event_loop(self.loop)
def test_close(self):
a, b = socket.socketpair()
trans = self.loop._make_socket_transport(a, asyncio.Protocol())
f = asyncio.ensure_future(self.loop.sock_recv(b, 100), loop=self.loop)
trans.close()
self.loop.run_until_complete(f)
self.assertEqual(f.result(), b'')
b.close()
def test_double_bind(self):
ADDRESS = r'\\.\pipe\test_double_bind-%s' % os.getpid()
server1 = windows_events.PipeServer(ADDRESS)
with self.assertRaises(PermissionError):
windows_events.PipeServer(ADDRESS)
server1.close()
def test_pipe(self):
res = self.loop.run_until_complete(self._test_pipe())
self.assertEqual(res, 'done')
async def _test_pipe(self):
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
with self.assertRaises(FileNotFoundError):
await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
[server] = await self.loop.start_serving_pipe(
UpperProto, ADDRESS)
self.assertIsInstance(server, windows_events.PipeServer)
clients = []
for i in range(5):
stream = asyncio.Stream(mode=asyncio.StreamMode.READ,
loop=self.loop, _asyncio_internal=True)
protocol = _StreamProtocol(stream,
loop=self.loop,
_asyncio_internal=True)
trans, proto = await self.loop.create_pipe_connection(
lambda: protocol, ADDRESS)
self.assertIsInstance(trans, asyncio.Transport)
self.assertEqual(protocol, proto)
clients.append((stream, trans))
for i, (r, w) in enumerate(clients):
w.write('lower-{}\n'.format(i).encode())
for i, (r, w) in enumerate(clients):
response = await r.readline()
self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
w.close()
await r.close()
server.close()
with self.assertRaises(FileNotFoundError):
await self.loop.create_pipe_connection(
asyncio.Protocol, ADDRESS)
return 'done'
def test_connect_pipe_cancel(self):
exc = OSError()
exc.winerror = _overlapped.ERROR_PIPE_BUSY
with mock.patch.object(_overlapped, 'ConnectPipe',
side_effect=exc) as connect:
coro = self.loop._proactor.connect_pipe('pipe_address')
task = self.loop.create_task(coro)
# check that it's possible to cancel connect_pipe()
task.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(task)
def test_wait_for_handle(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with 0.5s timeout;
# result should be False at timeout
fut = self.loop._proactor.wait_for_handle(event, 0.5)
start = self.loop.time()
done = self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertEqual(done, False)
self.assertFalse(fut.result())
# bpo-31008: Tolerate only 450 ms (at least 500 ms expected),
# because of bad clock resolution on Windows
self.assertTrue(0.45 <= elapsed <= 0.9, elapsed)
_overlapped.SetEvent(event)
# Wait for set event;
# result should be True immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
start = self.loop.time()
done = self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertEqual(done, True)
self.assertTrue(fut.result())
self.assertTrue(0 <= elapsed < 0.3, elapsed)
# asyncio issue #195: cancelling a done _WaitHandleFuture
# must not crash
fut.cancel()
def test_wait_for_handle_cancel(self):
event = _overlapped.CreateEvent(None, True, False, None)
self.addCleanup(_winapi.CloseHandle, event)
# Wait for unset event with a cancelled future;
# CancelledError should be raised immediately
fut = self.loop._proactor.wait_for_handle(event, 10)
fut.cancel()
start = self.loop.time()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(fut)
elapsed = self.loop.time() - start
self.assertTrue(0 <= elapsed < 0.1, elapsed)
# asyncio issue #195: cancelling a _WaitHandleFuture twice
# must not crash
fut = self.loop._proactor.wait_for_handle(event)
fut.cancel()
fut.cancel()
class WinPolicyTests(test_utils.TestCase):
def test_selector_win_policy(self):
async def main():
self.assertIsInstance(
asyncio.get_running_loop(),
asyncio.SelectorEventLoop)
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(
asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
finally:
asyncio.set_event_loop_policy(old_policy)
def test_proactor_win_policy(self):
async def main():
self.assertIsInstance(
asyncio.get_running_loop(),
asyncio.ProactorEventLoop)
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(
asyncio.WindowsProactorEventLoopPolicy())
asyncio.run(main())
finally:
asyncio.set_event_loop_policy(old_policy)
if __name__ == '__main__':
unittest.main()
|
draw_bubbles_py_3.py
|
# Visualization of GTac-Hand offline
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 14:13:15 2020
Draw bubbles offline
@author: Zeyu
"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import pandas as pd
from matplotlib.animation import FuncAnimation
from matplotlib import style
import numpy as np
from data_gen import data_checkout,raw_data_checkout
import serial
import time
from queue import Queue
import threading
# from celluloid import Camera
from data_collect_fingers import pre_process, FINGER, SECTION
from data_collect_fingers_five import pre_process_five
# def rotate90Clockwise(A):
# N = len(A[0])
# for i in range(N // 2):
# for j in range(i, N - i - 1):
# temp = A[i][j]
# A[i][j] = A[N - 1 - j][i]
# A[N - 1 - j][i] = A[N - 1 - i][N - 1 - j]
# A[N - 1 - i][N - 1 - j] = A[j][N - 1 - i]
# A[j][N - 1 - i] = temp
# return A
mat_x = [4,4,4,4,3,3,3,3,2,2,2,2,1,1,1,1]
mat_y = [1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4]
mat_xy = np.zeros([16,2])
mat_x_0 = [4,4,4,4,3,3,3,3,2,2,2,2,1,1,1,1]
mat_y_0 = [1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4]
mat_amp_index = 10
mat_loc_index = 0.001
mat_sz = np.zeros(16)
mag_dt = np.zeros(3)
data_length = 34
q = Queue(maxsize=34)
def plot_fingertip(ax):
ax.cla()
# plt.scatter(tri_cent_x, tri_cent_y, s=tri_cent_outsider_sz, alpha=0.4)
ax.scatter(mat_x_0, mat_y_0, s=1500, alpha=0.4)
# plt.scatter(mat_x, mat_y, s=500, alpha=1)
ax.scatter(mat_x, mat_y, s=mat_sz, alpha=0.3)
def plot_fingertip_2(scat,mat_x,mat_y,mat_sz):
# ax.scatter(mat_x_0, mat_y_0, s=1500, alpha=0.4)
# ax.scatter(mat_x, mat_y, s=mat_sz, alpha=0.3)
scat.set_offsets(np.array([mat_x,mat_y]).T)
scat.set_sizes(mat_sz)
def animate_quick(i):
start = time.time()
data_frame = data_fingertip.iloc[i]
update_paras_one_block(data_frame)
# tri_cent_x = 2.5 + (mag_dt[0]-tri_x_0)*0.005
# tri_cent_y = 2.5 + (mag_dt[1]-tri_y_0)*0.005
# tri_cent_outsider_sz = 1500+(mag_dt[2]-tri_z_0)*-2
# plot_fingertip(ax1)
plot_fingertip_2(scat1,mat_x,mat_y,mat_sz)
# plot_fingertip(ax2)
# plot_fingertip(ax3)
print('frames {}, time {}'.format(i,round((time.time() - start) * 1000)))
# def update_paras_twofinger(data):
#
# col = data.col
#
# if col in [8,9,10,11]:
# col = col - 8 # to make it 0 1 2 3
# row1 = data.mat5
# row2 = data.mat6
# row3 = data.mat7
# row4 = data.mat8
#
# mat_sz[4*col+3] = row1 * mat_amp_index
# mat_sz[4*col+2] = row2 * mat_amp_index
# mat_sz[4*col+1] = row3 * mat_amp_index
# mat_sz[4*col+0] = row4 * mat_amp_index
#
# mag_dt[0] = data.mag_x6
# mag_dt[1] = data.mag_y6
# mag_dt[2] = data.mag_z6
# tri_x_0 = init_avg.mag_x6
# tri_y_0 = init_avg.mag_y6
#
# # update the matrix location
# mat_x[4*col+3] = 4-col + (mag_dt[0]-tri_x_0)*mat_loc_index
# mat_x[4*col+2] = 4-col + (mag_dt[0]-tri_x_0)*mat_loc_index
# mat_x[4*col+1] = 4-col + (mag_dt[0]-tri_x_0)*mat_loc_index
# mat_x[4*col+0] = 4-col + (mag_dt[0]-tri_x_0)*mat_loc_index
#
# mat_y[4*col+3] = 4 + (mag_dt[1]-tri_y_0)*mat_loc_index
# mat_y[4*col+2] = 3 + (mag_dt[1]-tri_y_0)*mat_loc_index
# mat_y[4*col+1] = 2 + (mag_dt[1]-tri_y_0)*mat_loc_index
# mat_y[4*col+0] = 1 + (mag_dt[1]-tri_y_0)*mat_loc_index
#
# print('data updated')
def update_paras_one_block(assigned_data_frame):
for i in range(16):
mat_sz[i] = int(assigned_data_frame.iloc[i+4] * mat_amp_index)
col = 4 - i // 4
row = i % 4 + 1
mat_x[i] = col + assigned_data_frame.mag_x * mat_loc_index
mat_y[i] = row + assigned_data_frame.mag_y * mat_loc_index
def setup_scatter_ax(ax):
# rect is the box edge
rect = plt.Rectangle((-1,-1),
5,
5,
ec='none', lw=2, fc='none')
ax.add_patch(rect)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
scat_base = ax.scatter(mat_x_0, mat_y_0, s=1500, alpha=0.4)
scat = ax.scatter(mat_x, mat_y, s=150, alpha=1)
return scat
# def init():
# update_paras(np.zeros(data_length))
# def read_data(ser):
# while True:
# try:
# data = raw_data_checkout(ser)
# print('data:{}'.format(data)) # print what data read-out
# print('length:{}'.format(len(data)))
# if data:
# print('data:{}'.format(data)) # print what data read-out
# print('length:{}'.format(len(data)))
# if len(data)!=data_length:
# raise Exception('Sorry, wrong data size')
# except:
# print('data reading out is wrong')
#
# # if len(data)==data_length and data[col_index//4] in ['0','1','2','3']: # check if the data follows the right form.
# if len(data)==data_length: # check if the data follows the right form.
# update_paras_twofinger(data)
# def draw_fingertip(fig):
# ani = FuncAnimation(fig, animate_quick, frames=1000, interval=2)
# # plt.tight_layout()
# # ani.save('finger_rolling.gif', writer='pillow')
# plt.show()
def find_init(data_seq, n):
# average the n head data point of data_seq
sum = data_seq.iloc[0]
for i in range(1,n):
sum = sum + data_seq.iloc[i]
avg = (sum/n).astype('int64')
return avg
def celluloid_draw(data_seq):
return data_seq
def make_fingertip_video(finger,section):
video_name = 'video/' + finger+ '_' + section + '.mp4'
print('Start to save sensor video: {} of {}'.format(finger,section))
ani = FuncAnimation(fig, animate_quick, frames=len(data_fingertip), interval=120)
# ani.save('finger_rolling.gif', writer='pillow')
ani.save(video_name, writer='ffmpeg')
# plt.show()
print('Saved: {}'.format(video_name))
if __name__ == '__main__':
# try:
# ser = serial.Serial('COM12', 115200,timeout=.01)
# except:
# print('Serial Connection Failed, Will Try Again in 3 SECONDS')
# time.sleep(3)
# else:
# if ser.is_open:
# print('Serial Port Opened:\n',ser)
# ser.flushInput()
# time.sleep(1)
# thread1 = threading.Thread(target=read_data,args=(ser,))
# thread1.start()
print('starting to draw data from sensors')
data_seq = pd.read_csv('data/Achieved/test_five_2000_points_20210518_103354.csv', index_col=0)
# init_avg = find_init(data_seq, 100)
data_assigned = pre_process_five(data_seq)
data_thumb_dis = (data_assigned.loc[(data_assigned.finger == 'THUMB') & (data_assigned.section == 'DISTAL')]).reset_index()
data_index_dis = (data_assigned.loc[(data_assigned.finger == 'INDEX') & (data_assigned.section == 'DISTAL')]).reset_index()
data_thumb_pro = (data_assigned.loc[(data_assigned.finger == 'THUMB') & (data_assigned.section == 'PROXIMAL')]).reset_index()
data_index_pro = (data_assigned.loc[(data_assigned.finger == 'INDEX') & (data_assigned.section == 'PROXIMAL')]).reset_index()
data_thumb_met = (data_assigned.loc[(data_assigned.finger == 'THUMB') & (data_assigned.section == 'METACARPAL')]).reset_index()
data_index_met = (data_assigned.loc[(data_assigned.finger == 'INDEX') & (data_assigned.section == 'METACARPAL')]).reset_index()
for finger in FINGER:
for section in SECTION:
data_fingertip = (data_assigned.loc[(data_assigned.finger == finger) & (data_assigned.section == section)]).reset_index()
fig = plt.figure()
# fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax1 = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(0, 5), ylim=(0, 5))
scat1 = setup_scatter_ax(ax1)
#
# ax2 = fig.add_subplot(312, aspect='equal', autoscale_on=False,
# xlim=(-5, 5), ylim=(-5, 5))
# ax2 = setup_ax(ax2)
#
# ax3 = fig.add_subplot(313, aspect='equal', autoscale_on=False,
# xlim=(-5, 5), ylim=(-5, 5))
# ax3 = setup_ax(ax3)
make_fingertip_video(finger, section)
# data_index = (data_assigned.loc[data_assigned.finger == 'INDEX']).reset_index()
# data_thumb_dis.plot.line(x='milliseconds', y='mag_x')
# fig = plt.figure()
# # fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
# ax1 = fig.add_subplot(111, aspect='equal', autoscale_on=False,
# xlim=(0, 5), ylim=(0, 5))
# scat1 = setup_ax(ax1)
# #
# # ax2 = fig.add_subplot(312, aspect='equal', autoscale_on=False,
# # xlim=(-5, 5), ylim=(-5, 5))
# # ax2 = setup_ax(ax2)
# #
# # ax3 = fig.add_subplot(313, aspect='equal', autoscale_on=False,
# # xlim=(-5, 5), ylim=(-5, 5))
# # ax3 = setup_ax(ax3)
#
# ani = FuncAnimation(fig, animate_quick, frames=150, interval=10)
#
# # ani.save('finger_rolling.gif', writer='pillow')
# ani.save('./video/index_pro.mp4', writer='ffmpeg')
# plt.show()
|
signaler_qt.py
|
#!/usr/bin/env python
# encoding: utf-8
import threading
import time
from PySide import QtCore
import zmq
from zmq.auth.thread import ThreadAuthenticator
from api import SIGNALS
from certificates import get_frontend_certificates
from utils import get_log_handler
logger = get_log_handler(__name__)
class SignalerQt(QtCore.QObject):
"""
Signaling server.
Receives signals from the signaling client and emit Qt signals for the GUI.
"""
PORT = "5667"
BIND_ADDR = "tcp://127.0.0.1:%s" % PORT
def __init__(self):
QtCore.QObject.__init__(self)
# Note: we use a plain thread instead of a QThread since works better.
# The signaler was not responding on OSX if the worker loop was run in
# a QThread.
self._worker_thread = threading.Thread(target=self._run)
self._do_work = threading.Event()
def start(self):
"""
Start the worker thread for the signaler server.
"""
self._do_work.set()
self._worker_thread.start()
def _run(self):
"""
Start a loop to process the ZMQ requests from the signaler client.
"""
logger.debug("Running SignalerQt loop")
context = zmq.Context()
socket = context.socket(zmq.REP)
# Start an authenticator for this context.
auth = ThreadAuthenticator(context)
auth.start()
auth.allow('127.0.0.1')
# Tell authenticator to use the certificate in a directory
auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY)
public, secret = get_frontend_certificates()
socket.curve_publickey = public
socket.curve_secretkey = secret
socket.curve_server = True # must come before bind
socket.bind(self.BIND_ADDR)
while self._do_work.is_set():
# Wait for next request from client
try:
request = socket.recv(zmq.NOBLOCK)
logger.debug("Received request: '{0}'".format(request))
socket.send("OK")
self._process_request(request)
except zmq.ZMQError as e:
if e.errno != zmq.EAGAIN:
raise
time.sleep(0.01)
logger.debug("SignalerQt thread stopped.")
def stop(self):
"""
Stop the SignalerQt blocking loop.
"""
self._do_work.clear()
def _process_request(self, request_json):
"""
Process a request and call the according method with the given
parameters.
:param request_json: a json specification of a request.
:type request_json: str
"""
try:
request = zmq.utils.jsonapi.loads(request_json)
signal = request['signal']
data = request['data']
except Exception as e:
msg = "Malformed JSON data in Signaler request '{0}'. Exc: {1!r}"
msg = msg.format(request_json, e)
logger.critical(msg)
raise
if signal not in SIGNALS:
logger.error("Unknown signal received, '{0}'".format(signal))
return
try:
qt_signal = getattr(self, signal)
except Exception:
logger.warning("Signal not implemented, '{0}'".format(signal))
return
logger.debug("Emitting '{0}'".format(signal))
if data is None:
qt_signal.emit()
else:
qt_signal.emit(data)
|
plutus.py
|
# Plutus Bitcoin Brute Forcer
# Made by Isaac Delly
# https://github.com/Isaacdelly/Plutus
import os
import time
import pickle
import hashlib
import binascii
import multiprocessing
from ellipticcurve.privateKey import PrivateKey
DATABASE = r'database/'
def generate_private_key():
"""
Generate a random 32-byte hex integer which serves as a randomly
generated Bitcoin private key.
Average Time: 0.0000061659 seconds
"""
return binascii.hexlify(os.urandom(32)).decode('utf-8').upper()
def private_key_to_public_key(private_key):
"""
Accept a hex private key and convert it to its respective public key.
Because converting a private key to a public key requires SECP256k1 ECDSA
signing, this function is the most time consuming and is a bottleneck in
the overall speed of the program.
Average Time: 0.0031567731 seconds
"""
pk = PrivateKey().fromString(bytes.fromhex(private_key))
return '04' + pk.publicKey().toString().hex().upper()
def public_key_to_address(public_key):
"""
Accept a public key and convert it to its resepective P2PKH wallet address.
Average Time: 0.0000801390 seconds
"""
output = []
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
var = hashlib.new('ripemd160')
encoding = binascii.unhexlify(public_key.encode())
var.update(hashlib.sha256(encoding).digest())
var_encoded = ('00' + var.hexdigest()).encode()
digest = hashlib.sha256(binascii.unhexlify(var_encoded)).digest()
var_hex = '00' + var.hexdigest() + hashlib.sha256(digest).hexdigest()[0:8]
count = [char != '0' for char in var_hex].index(True) // 2
n = int(var_hex, 16)
while n > 0:
n, remainder = divmod(n, 58)
output.append(alphabet[remainder])
for i in range(count): output.append(alphabet[0])
return ''.join(output[::-1])
def process(private_key, public_key, address, database):
"""
Accept an address and query the database. If the address is found in the
database, then it is assumed to have a balance and the wallet data is
written to the hard drive. If the address is not in the database, then it
is assumed to be empty and printed to the user.
Average Time: 0.0000026941 seconds
"""
if address in database[0] or \
address in database[1] or \
address in database[2] or \
address in database[3]:
with open('plutus.txt', 'a') as file:
file.write('hex private key: ' + str(private_key) + '\n' +
'WIF private key: ' + str(private_key_to_WIF(private_key)) + '\n' +
'public key: ' + str(public_key) + '\n' +
'address: ' + str(address) + '\n\n')
# else:
# print(str(address))
def private_key_to_WIF(private_key):
"""
Convert the hex private key into Wallet Import Format for easier wallet
importing. This function is only called if a wallet with a balance is
found. Because that event is rare, this function is not significant to the
main pipeline of the program and is not timed.
"""
digest = hashlib.sha256(binascii.unhexlify('80' + private_key)).hexdigest()
var = hashlib.sha256(binascii.unhexlify(digest)).hexdigest()
var = binascii.unhexlify('80' + private_key + var[0:8])
alphabet = chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
value = pad = 0
result = ''
for i, c in enumerate(var[::-1]): value += 256**i * c
while value >= len(alphabet):
div, mod = divmod(value, len(alphabet))
result, value = chars[mod] + result, div
result = chars[value] + result
for c in var:
if c == 0: pad += 1
else: break
return chars[0] * pad + result
def main(database):
"""
Create the main pipeline by using an infinite loop to repeatedly call the
functions, while utilizing multiprocessing from __main__. Because all the
functions are relatively fast, it is better to combine them all into
one process.
"""
t0 = time.time()
iter = 0
thr_id = str(multiprocessing.current_process().name).split('-')[1]
while True:
private_key = generate_private_key() # 0.0000061659 seconds
public_key = private_key_to_public_key(private_key) # 0.0031567731 seconds
address = public_key_to_address(public_key) # 0.0000801390 seconds
process(private_key, public_key, address, database) # 0.0000026941 seconds
# --------------------
# 0.0032457721 seconds
iter += 1
if ((time.time() - t0) >= 1):
for i in range(int(thr_id)):
print('\t', end = ' ')
print(thr_id + ':' + str(iter) + '/s', end = '\r')
iter = 0
t0 = time.time()
if __name__ == '__main__':
"""
Deserialize the database and read into a list of sets for easier selection
and O(1) complexity. Initialize the multiprocessing to target the main
function with cpu_count() concurrent processes.
"""
database = [set() for _ in range(4)]
count = len(os.listdir(DATABASE))
half = count // 2
quarter = half // 2
for c, p in enumerate(os.listdir(DATABASE)):
print('\rreading database: ' + str(c + 1) + '/' + str(count), end = ' ')
with open(DATABASE + p, 'rb') as file:
if c < half:
if c < quarter: database[0] = database[0] | pickle.load(file)
else: database[1] = database[1] | pickle.load(file)
else:
if c < half + quarter: database[2] = database[2] | pickle.load(file)
else: database[3] = database[3] | pickle.load(file)
print('DONE')
# To verify the database size, remove the # from the line below
#print('database size: ' + str(sum(len(i) for i in database))); quit()
for cpu in range(multiprocessing.cpu_count()):
multiprocessing.Process(target = main, args = (database, )).start()
|
top.py
|
"""
TODO:
1. Refactor code so it's organized (now that I have an idea of what it'll do)
2. Add more sorting by changing stat dict to something that's easier to sort
"""
import os
import sys
import glob
import pickle
from decimal import Decimal
import time
from pprint import pprint
from os import system, name
from datetime import timedelta, datetime
import threading
import re
from collections import OrderedDict
from operator import getitem
from .color import colors
from .getch import getch, getchb
from .termsize import get_terminal_size
import cbpro
os.environ['TZ'] = 'UTC'
time.tzset()
GETCH_LOCK = threading.Lock()
g_last_input = None
g_filter = ''
g_paused = False
g_show_orders = False
PRICE_CACHE = {}
PRICE_CACHE_RATE = 73.1331 #update every N seconds
def clear():
if name == 'nt':
system('cls')
else:
system('clear')
def get_current_price(coin):
last_update = time.time()
current_price = Decimal('0.0')
if not coin in PRICE_CACHE:
public_client = cbpro.PublicClient()
ticker = public_client.get_product_ticker(product_id=coin)
current_price = Decimal(ticker['price'])
else:
# check cache age
if time.time() - PRICE_CACHE[coin]['last_update'] > PRICE_CACHE_RATE:
public_client = cbpro.PublicClient()
ticker = public_client.get_product_ticker(product_id=coin)
current_price = Decimal(ticker['price'])
else:
last_update = PRICE_CACHE[coin]['last_update']
current_price = PRICE_CACHE[coin]['price']
PRICE_CACHE[coin] = {'price':current_price, 'last_update':last_update}
return Decimal(current_price)
def get_input():
global g_last_input
global g_paused
global g_filter
global g_show_orders
while 1:
with GETCH_LOCK:
g_last_input = getch()
if g_last_input == 'q':
print('exiting...')
break
elif g_last_input == 'o':
g_show_orders = True
elif g_last_input == 's':
g_show_orders = False
elif g_last_input == 'f':
with GETCH_LOCK:
g_paused = True
buf = 'Enter filter regex (e.g. BT.*): '
start_len = len(buf)
c = ''
fbuf = ''
sys.stdout.write('\n')
sys.stdout.write(buf)
sys.stdout.flush()
skip_next = 0
while c not in ('\n', '\r'):
c = getchb()
# skip escape sequence (arrow keys, etc)
if ord(c) == 27:
skip_next = 1
continue
if skip_next == 1:
skip_next = 2
continue
if skip_next == 2:
skip_next = 0
continue
if ord(c) == 127:
if len(buf) > start_len:
buf = buf[:-1]
sys.stdout.write('\r')
sys.stdout.write(buf+' ')
if fbuf:
fbuf = fbuf[:-1]
elif c == '\n':
break
else:
buf += c
fbuf += c
sys.stdout.write('\r')
sys.stdout.write(buf)
sys.stdout.flush()
g_filter = fbuf.strip()
print('\n')
try:
re.compile(g_filter, flags=re.IGNORECASE)
except:
print('Failed to compile regex: {}'.format(g_filter))
g_filter = ''
g_paused = False
time.sleep(0.001)
def draw_line(thickness):
if thickness == 1:
print('{}{}{}'.format(colors.fg.darkgrey, u'\u2581'*80, colors.reset))
elif thickness == 2:
print('{}{}{}'.format(colors.fg.darkgrey, u'\u2582'*80, colors.reset))
elif thickness == 3:
print('{}{}{}'.format(colors.fg.darkgrey, u'\u2583'*80, colors.reset))
elif thickness == 4:
print('{}{}{}'.format(colors.fg.darkgrey, u'\u2584'*80, colors.reset))
elif thickness == 5:
print('{}{}{}'.format(colors.fg.darkgrey, u'\u2585'*80, colors.reset))
else:
print('{}{}{}'.format(colors.fg.darkgrey, u'\u2586'*80, colors.reset))
def sec2time(sec):
if hasattr(sec,'__len__'):
return [sec2time(s) for s in sec]
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
pattern = r'%2dh %2dm %2ds'
if h == 0 and m == 0:
return r'%2ds' % (s)
if h == 0 and d == 0:
return r'%2dm %2ds' % (m, s)
if d == 0:
#return pattern % (h, m, s)
return r'%2dh %2dm %2ds' % (h, m, s)
return ('%dd' + pattern) % (d, h, m, s)
def avg(l):
if len(l) < 1:
return Decimal(0.0)
return Decimal(sum(l))/len(l)
def show_orders():
files = glob.glob(sys.argv[1]+'/*.cache')
if not files:
print('ERROR: empty directory')
exit(1)
stats = {}
stats_incomplete = {}
recent = []
cur_time = time.time()
open_times = []
profit_dates = {}
print('[{}q{}]{}uit {}[{}f{}]{}ilter {}[{}s{}]{}tats{:>51} UTC'.format(
colors.fg.red, colors.reset, colors.fg.lightgrey,
colors.reset, colors.fg.pink, colors.reset, colors.fg.lightgrey,
colors.reset, colors.fg.green, colors.reset, colors.fg.lightgrey,
str(datetime.now()).split('.')[0])
)
draw_line(1)
print('Open orders:')
print('{:>15} {:>9} {:>13} {:>13} {:>14} {:>11}'.format(
'Duration', 'Coin', 'Bought', 'Sell-Price', 'Size', 'Diff',
))
w, h = get_terminal_size()
hn = 0
total = 0
for f in files:
data = None
coin = None
with open(f, "rb") as fd:
data = pickle.load(fd)
if not data:
continue
for order_id in data: #.items():
v = data[order_id]
data[order_id]['created_at'] = v['first_status']['created_at']
sorted_data = OrderedDict(sorted(data.items(), key = lambda x: getitem(x[1], 'created_at'), reverse=True))
#sorted_keys = sorted(data.keys(), reverse=True)
for order_id, v in sorted_data.items():
if not v['first_status']:
continue
coin = v['first_status']['product_id']
if g_filter:
if not re.search(g_filter, coin, re.IGNORECASE):
continue
if v['completed'] or not v['sell_order']:
continue
sell = v['sell_order']
created_at = time.mktime(time.strptime(sell['created_at'].split('.')[0], '%Y-%m-%dT%H:%M:%S'))
duration = cur_time - created_at
price = sell['price']
size = sell['size']
bought_price = round(Decimal(v['last_status']['executed_value']) / Decimal(v['last_status']['filled_size']), 4)
if hn+10 < h:
cur_price = get_current_price(sell['product_id'])
#print('{:>17} {:>9} {:>16} {:>16} {:>18}'.format(
print('{:>15} {:>9} {:>13} {:>13} {:>14} {:>11}'.format(
sec2time(duration),
sell['product_id'],
bought_price, price,
size,
round(cur_price - bought_price, 2)
))
hn += 1
if hn > h:
print('{}... truncated ({}/{} displayed) ...{}'.format(colors.fg.red, h, hn, colors.reset))
def top():
clear()
print('')
if g_show_orders:
return show_orders()
files = glob.glob(sys.argv[1]+'/*.cache')
if not files:
print('ERROR: empty directory')
exit(1)
stats = {}
stats_incomplete = {}
recent = []
cur_time = time.time()
open_times = []
profit_dates = {}
w, h = get_terminal_size()
hn = 0
open_percents = []
for f in files:
data = None
coin = None
with open(f, "rb") as fd:
data = pickle.load(fd)
if not data:
continue
for order_id, v in data.items():
if not v['first_status']:
continue
coin = v['first_status']['product_id']
if g_filter:
if not re.search(g_filter, coin, re.IGNORECASE):
continue
if not coin in stats:
stats[coin] = {
'epoch_diffs':[], 'profits':[], 'profits_total':Decimal('0.0'),
'open_orders':0, 'done_orders':0, 'avg_close_time':0.0, 'error_orders':0,
}
first_status = v['first_status']
epoch = time.mktime(time.strptime(first_status['created_at'].split('.')[0], '%Y-%m-%dT%H:%M:%S'))
if v['completed'] and 'sell_order_completed' in v and v['sell_order_completed']:
date_only = v['sell_order_completed']['done_at'].split('T')[0]
if not date_only in profit_dates:
profit_dates[date_only] = []
profit_dates[date_only].append(v['profit_usd'])
end_epoch = time.mktime(time.strptime(v['sell_order_completed']['done_at'].split('.')[0], '%Y-%m-%dT%H:%M:%S'))
epoch_diff = end_epoch - epoch
cur_diff = cur_time - end_epoch
if cur_diff < (86400/12):
recent.append((coin, v))
profit = v['profit_usd']
stats[coin]['epoch_diffs'].append(epoch_diff)
stats[coin]['profits'].append(profit)
stats[coin]['profits_total'] += profit
stats[coin]['done_orders'] += 1
elif v['completed']:
stats[coin]['error_orders'] += 1
else:
cur_price = get_current_price(coin)
try:
cur_perc = (100*(cur_price/Decimal(v['sell_order']['price']))) - Decimal('100.0')
open_percents.append(cur_perc)
except Exception as err:
# I think sometimes the state drifts after a cancel
# and v['sell'] was removed but v['completed'] is not True yet
#print('ERR:', err, v['sell_order'])
pass
start_epoch = time.mktime(time.strptime(v['first_status']['created_at'].split('.')[0], '%Y-%m-%dT%H:%M:%S'))
open_times.append(cur_time - start_epoch)
stats[coin]['open_orders'] += 1
#sorted_keys = sorted(stats.keys())
#sorted_keys = sorted(stats.items(), key=lambda item: item['profits'])
sorted_keys = OrderedDict(sorted(stats.items(), key = lambda x: getitem(x[1], 'profits_total'), reverse=True))
print('[{}q{}]{}uit {}[{}f{}]{}ilter {}[{}o{}]{}rders{:>50} UTC'.format(
colors.fg.red, colors.reset, colors.fg.lightgrey,
colors.reset, colors.fg.pink, colors.reset, colors.fg.lightgrey,
colors.reset, colors.fg.blue, colors.reset, colors.fg.lightgrey,
str(datetime.now()).split('.')[0])
)
draw_line(1)
print('{}{:>8} {}{:>13} {}{:>7} {}{:>7} {}{:>7} {}{:>12} {}{:>19}{}'.format(
colors.fg.lightcyan, 'Coin',
colors.fg.green, 'Profits',
colors.fg.yellow, 'Open',
colors.fg.blue, 'Done',
colors.fg.red, 'Error',
colors.fg.pink, 'Avg-Profit',
colors.fg.orange, 'Avg-Time',
colors.reset,
))
draw_line(1)
total_profits = Decimal('0.0')
total_open_orders = 0
total_done_orders = 0
total_error_orders = 0
agg_epoch = []
agg_profits = []
for key,v in sorted_keys.items():
coin = key
if not re.search(g_filter, coin, re.IGNORECASE):
continue
#v = stats[key]
if hn+10 < h:
print('{}{:>8} {}{:>13} {}{:>7} {}{:>7} {}{:>7} {}{:>12} {}{:>19}{}'.format(
colors.fg.lightcyan, coin,
colors.fg.green, '$'+str(round(sum(v['profits']), 2)),
colors.fg.yellow, v['open_orders'],
colors.fg.blue, v['done_orders'],
colors.fg.red, v['error_orders'],
colors.fg.pink, '$'+str(round(avg(stats[coin]['profits']), 2)),
colors.fg.orange, sec2time(round(avg(v['epoch_diffs']), 2)) if v['epoch_diffs'] else 'None',
colors.reset,
))
hn += 1
#if v['epoch_diffs']:
agg_epoch.append(round(avg(v['epoch_diffs']), 2) if v['epoch_diffs'] else Decimal('0.0'))
agg_profits.append(round(avg(stats[coin]['profits']), 2))
total_open_orders += v['open_orders']
total_done_orders += v['done_orders']
total_error_orders += v['error_orders']
total_profits += round(sum(v['profits']), 2)
if hn+12 < h:
draw_line(1)
print('{}{:>8} {}{:>13} {}{:>7} {}{:>7} {}{:>7} {}{:>12} {}{:>19}{}'.format(
colors.fg.darkgrey, 'all',
colors.fg.green, '$'+str(total_profits),
colors.fg.yellow, total_open_orders,
colors.fg.blue, total_done_orders,
colors.fg.red, total_error_orders,
colors.fg.pink, '$'+str(round(avg(agg_profits), 2)),
colors.fg.orange, sec2time(round(avg(agg_epoch), 2)),
colors.reset,
))
print('')
hn += 3
if hn+10 < h:
draw_line(3)
hn += 1
if open_times:
min_open_time = sec2time(round(min(open_times), 2))
max_open_time = sec2time(round(max(open_times), 2))
avg_open_time = sec2time(round(avg(open_times), 2))
else:
min_open_time = Decimal('0.0')
max_open_time = Decimal('0.0')
avg_open_time = Decimal('0.0')
if hn+12 < h:
print('{}{:>16} {:>16} {:>16} {:>16}'.format(colors.fg.lightgrey, 'Open order times', 'Min', 'Max', 'Avg'))
print('{}{:>16} {:>16} {:>16} {:>16}'.format(colors.fg.lightred, ' ', min_open_time, max_open_time, avg_open_time))
hn += 2
if hn+10 < h:
cur_drift = round(sum(open_percents), 2)
if cur_drift < 0:
print('{}Goal-drift: {}{}%'.format(colors.reset, colors.fg.red, cur_drift))
else:
print('{}Goal-drift: {}{}%'.format(colors.reset, colors.fg.green, cur_drift))
hn+=1
# Last 7 days with profits
#print('{}{:>26}'.format(colors.fg.lightgrey, 'Last 7 days profits'))
sorted_dates_val = OrderedDict(sorted(profit_dates.items(), key = lambda x: x[1], reverse=True))
sorted_dates = sorted(profit_dates.keys(), reverse=True)
x = []
y = []
for key in sorted_dates[:7]:
if not re.search(g_filter, key, re.IGNORECASE):
continue
val = profit_dates[key]
date_total = round(sum(val), 2)
x.append(key)
y.append(date_total)
#print(colors.fg.cyan, ' {} {}{:>15}'.format(key, colors.fg.green, '$'+str(date_total)))
if hn+10 < h:
draw_line(3)
hn += 1
if y:
total_profit = []
max_y = max(y)
width = 50
for i in range(len(y)):
key = x[i]
yy = y[i]
nstars = int((yy/max_y) * width)
if hn+10 < h:
print('{}{}{}{:>11} {}{}'.format(colors.fg.cyan, key, colors.fg.green, '$'+str(yy), colors.fg.darkgrey, '*'*nstars))
hn += 1
total_profit.append(yy)
if hn+10 < h:
nstars = int((avg(total_profit)/max_y) * width)
print('{}{}{}{:>11} {}{}^'.format(colors.fg.cyan, 'Daily Avg ', colors.fg.green, '$'+str(round(avg(total_profit), 2)), colors.fg.darkgrey, ' '*(nstars-1)))
if recent:
if hn+10 < h:
draw_line(3)
print('{}{}'.format(colors.fg.lightgrey, 'Recently completed orders'), colors.fg.blue)
hn += 2
#print('Recently completed orders:', colors.fg.lightblue)
print(' {:>8} {:>11} {:>17} {:>19}'.format('Coin', 'Profit', 'Duration', 'Completed'), colors.fg.green)
# bubble sort, why not
for i in range(len(recent)-1):
for j in range(len(recent)-i-1):
if recent[j][1]['sell_order_completed']['done_at'] < recent[j+1][1]['sell_order_completed']['done_at']:
tmp = recent[j+1]
recent[j+1] = recent[j]
recent[j] = tmp
for coin, v in recent:
if not re.search(g_filter, coin, re.IGNORECASE):
continue
first_status = v['first_status']
epoch = time.mktime(time.strptime(first_status['created_at'].split('.')[0], '%Y-%m-%dT%H:%M:%S'))
end_epoch = time.mktime(time.strptime(v['sell_order_completed']['done_at'].split('.')[0], '%Y-%m-%dT%H:%M:%S'))
epoch_diff = end_epoch - epoch
cur_diff = cur_time - end_epoch
profit = round(v['profit_usd'], 2)
if hn+10 < h:
print(' {:>8} {:>11} {:>17} {:>19}'.format(
coin, '$'+str(profit), sec2time(epoch_diff), str(sec2time(cur_diff))+' ago')
)
hn += 1
print(colors.reset)
def usage():
print('usage: {} <cache-dir>'.format(sys.argv[0]))
exit(1)
def main():
global g_last_input
global g_paused
global g_filter
global g_show_orders
if len(sys.argv) != 2:
usage()
files = glob.glob(sys.argv[1]+'/*.cache')
if not files:
print('ERROR: empty directory')
exit(1)
input_thread = threading.Thread(target=get_input)
input_thread.start()
sleep_time = 1.33
running = True
while running:
while g_paused:
time.sleep(0.1)
with GETCH_LOCK:
if g_last_input == 'q':
running = False
break
top()
if g_filter:
print('{}{}filter: {}{}'.format(colors.bg.blue, colors.fg.lightgrey, g_filter, colors.reset))
if g_show_orders:
sleep_time = 2.11
else:
sleep_time = 1.33
time.sleep(sleep_time)
if __name__ == '__main__':
main()
|
dual_blob_store.py
|
#!/usr/bin/env python
"""A BlobStore proxy that writes to two BlobStores."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import threading
import time
from future.builtins import str
from future.moves import queue
from typing import Callable, Dict, Iterable, Optional, Text, TypeVar
from grr_response_core import config
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_core.stats import metrics
from grr_response_server import blob_store
from grr_response_server.rdfvalues import objects as rdf_objects
# Maximum queue length, where each queue entry can consist of multiple blobs.
# Thus the number of enqueued blobs can be considerably bigger. This only
# serves as a basic measure to prevent unbounded memory growth.
_SECONDARY_WRITE_QUEUE_MAX_LENGTH = 30
DUAL_BLOB_STORE_LATENCY = metrics.Event(
"dual_blob_store_latency",
fields=[("backend_class", str), ("method", str)],
bins=[0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50])
DUAL_BLOB_STORE_SUCCESS_COUNT = metrics.Counter(
"dual_blob_store_success_count",
fields=[("backend_class", str), ("method", str)])
DUAL_BLOB_STORE_ERROR_COUNT = metrics.Counter(
"dual_blob_store_error_count",
fields=[("backend_class", str), ("method", str)])
DUAL_BLOB_STORE_DISCARD_COUNT = metrics.Counter(
"dual_blob_store_discard_count",
fields=[("backend_class", str), ("method", str)])
def _InstantiateBlobStore(name):
try:
cls = blob_store.REGISTRY[name]
except KeyError:
raise ValueError("No blob store %s found." % name)
return cls()
I = TypeVar("I")
O = TypeVar("O")
def _MeasureFn(bs, fn, arg):
"""Runs fn(arg) and tracks latency and error metrics."""
start_time = time.time()
cls_name = compatibility.GetName(type(bs))
fn_name = compatibility.GetName(fn)
try:
result = fn(arg)
except Exception: # pylint: disable=broad-except
DUAL_BLOB_STORE_ERROR_COUNT.Increment(fields=[cls_name, fn_name])
raise
DUAL_BLOB_STORE_LATENCY.RecordEvent(
time.time() - start_time, fields=[cls_name, fn_name])
DUAL_BLOB_STORE_SUCCESS_COUNT.Increment(fields=[cls_name, fn_name])
return result
def _Enqueue(item_queue, bs, fn, arg):
try:
item_queue.put_nowait((bs, fn, arg))
except queue.Full:
DUAL_BLOB_STORE_DISCARD_COUNT.Increment(
fields=[compatibility.GetName(type(bs)),
compatibility.GetName(fn)])
class DualBlobStore(blob_store.BlobStore):
"""A BlobStore proxy that writes to two BlobStores.
This class is backed by both a primary and secondary BlobStore. Requests to
read and write blobs are immediately processed by the primary, return as soon
as the primary has finished processing, and only raise if the primary raises.
Additionally, blobs are concurrently, non-blockingly written to the secondary
from a background thread. If the secondary processes blobs slower than the
primary, writes are queued and delayed. Writes to the secondary can be
discarded, if the number of queued writes is too high. Writes to the primary
are never discarded or delayed.
"""
def __init__(self,
primary = None,
secondary = None):
"""Instantiates a new DualBlobStore and its primary and secondary BlobStore.
Args:
primary: The class name of the primary blob store implementation
secondary: The class name of the secondary blob store implementation
"""
if primary is None:
primary = config.CONFIG["DualBlobStore.primary_implementation"]
if secondary is None:
secondary = config.CONFIG["DualBlobStore.secondary_implementation"]
precondition.AssertType(primary, Text)
precondition.AssertType(secondary, Text)
self._primary = _InstantiateBlobStore(primary)
self._secondary = _InstantiateBlobStore(secondary)
self._write_queue = queue.Queue(_SECONDARY_WRITE_QUEUE_MAX_LENGTH)
self._read_queue = queue.Queue()
# Signal that can be set to False from tests to stop the background
# processing threads.
self._thread_running = True
self._threads = []
self._StartBackgroundThread("DualBlobStore_WriteThread", self._write_queue)
self._StartBackgroundThread("DualBlobStore_ReadThread", self._read_queue)
def WriteBlobs(self,
blob_id_data_map):
"""Creates or overwrites blobs."""
_Enqueue(self._write_queue, self._secondary, self._secondary.WriteBlobs,
dict(blob_id_data_map))
_MeasureFn(self._primary, self._primary.WriteBlobs, blob_id_data_map)
def ReadBlobs(self, blob_ids
):
"""Reads all blobs, specified by blob_ids, returning their contents."""
_Enqueue(self._read_queue, self._secondary, self._secondary.ReadBlobs,
list(blob_ids))
return _MeasureFn(self._primary, self._primary.ReadBlobs, blob_ids)
def ReadBlob(self, blob_id):
"""Reads the blob contents, identified by the given BlobID."""
_Enqueue(self._read_queue, self._secondary, self._secondary.ReadBlob,
blob_id)
return _MeasureFn(self._primary, self._primary.ReadBlob, blob_id)
def CheckBlobExists(self, blob_id):
"""Checks if a blob with a given BlobID exists."""
_Enqueue(self._read_queue, self._secondary, self._secondary.CheckBlobExists,
blob_id)
return _MeasureFn(self._primary, self._primary.CheckBlobExists, blob_id)
def CheckBlobsExist(self, blob_ids
):
"""Checks if blobs for the given identifiers already exist."""
_Enqueue(self._read_queue, self._secondary, self._secondary.CheckBlobsExist,
list(blob_ids))
return _MeasureFn(self._primary, self._primary.CheckBlobsExist, blob_ids)
def _StartBackgroundThread(self, thread_name, item_queue):
def _ThreadLoop():
while self._thread_running:
bs, fn, arg = item_queue.get()
try:
_MeasureFn(bs, fn, arg)
except Exception as e: # pylint: disable=broad-except
logging.exception(e)
item_queue.task_done()
thread = threading.Thread(target=_ThreadLoop, name=thread_name)
thread.daemon = True
thread.start()
self._threads.append(thread)
|
run_engine.py
|
import asyncio
from datetime import datetime
import time as ttime
import sys
import logging
from warnings import warn
from inspect import Parameter, Signature
from itertools import count, tee
from collections import deque, defaultdict, ChainMap
from enum import Enum
import functools
import inspect
from contextlib import ExitStack
import threading
import weakref
import concurrent
from event_model import DocumentNames, schema_validators
from super_state_machine.machines import StateMachine
from super_state_machine.extras import PropertyMachine
from super_state_machine.errors import TransitionError
try:
from asyncio import current_task
except ImportError:
# handle py < 3,7
from asyncio.tasks import Task
current_task = Task.current_task
del Task
from .utils import (CallbackRegistry, SigintHandler, normalize_subs_input,
AsyncInput, new_uid, NoReplayAllowed,
RequestAbort, RequestStop, RunEngineInterrupted,
IllegalMessageSequence, FailedPause, FailedStatus,
InvalidCommand, PlanHalt, Msg, ensure_generator,
single_gen, short_uid, default_during_task)
class _RunEnginePanic(Exception):
...
class RunEngineStateMachine(StateMachine):
"""
Attributes
----------
is_idle
State machine is in its idle state
is_running
State machine is in its running state
is_paused
State machine is paused.
"""
class States(Enum):
"""state.name = state.value"""
IDLE = 'idle'
RUNNING = 'running'
PAUSING = 'pausing'
PAUSED = 'paused'
HALTING = 'halting'
STOPPING = 'stopping'
ABORTING = 'aborting'
SUSPENDING = 'suspending'
PANICKED = 'panicked'
@classmethod
def states(cls):
return [state.value for state in cls]
class Meta:
allow_empty = False
initial_state = 'idle'
transitions = {
# Notice that 'transitions' and 'named_transitions' have
# opposite to <--> from structure.
# from_state : [valid_to_states]
'idle': ['running', 'panicked'],
'running': ['idle', 'pausing', 'halting', 'stopping',
'aborting', 'suspending', 'panicked'],
'pausing': ['paused', 'idle', 'halting', 'aborting', 'panicked'],
'suspending': ['running', 'halting', 'aborting', 'panicked'],
'paused': ['idle', 'running', 'halting', 'stopping', 'aborting',
'panicked'],
'halting': ['idle', 'panicked'],
'stopping': ['idle', 'panicked'],
'aborting': ['idle', 'panicked'],
'panicked': []
}
named_checkers = [
('can_pause', 'paused'),
]
class LoggingPropertyMachine(PropertyMachine):
"""expects object to have a `log` attribute
and a `state_hook` attribute that is ``None`` or a callable with signature
``f(value, old_value)``"""
def __init__(self, machine_type):
super().__init__(machine_type)
def __set__(self, obj, value):
own = type(obj)
old_value = self.__get__(obj, own)
with obj._state_lock:
super().__set__(obj, value)
value = self.__get__(obj, own)
obj.log.info("Change state on %r from %r -> %r",
obj, old_value, value)
if obj.state_hook is not None:
obj.state_hook(value, old_value)
def __get__(self, instance, owner):
if instance is None:
return super().__get__(instance, owner)
with instance._state_lock:
return super().__get__(instance, owner)
# See RunEngine.__call__.
_call_sig = Signature(
[Parameter('self', Parameter.POSITIONAL_ONLY),
Parameter('plan', Parameter.POSITIONAL_ONLY),
Parameter('subs', Parameter.POSITIONAL_ONLY, default=None),
Parameter('metadata_kw', Parameter.VAR_KEYWORD)])
def default_scan_id_source(md):
return md.get('scan_id', 0) + 1
def _state_locked(func):
@functools.wraps(func)
def inner(self, *args, **kwargs):
with self._state_lock:
return func(self, *args, **kwargs)
return inner
class RunEngine:
"""The Run Engine execute messages and emits Documents.
Parameters
----------
md : dict-like, optional
The default is a standard Python dictionary, but fancier
objects can be used to store long-term history and persist
it between sessions. The standard configuration
instantiates a Run Engine with historydict.HistoryDict, a
simple interface to a sqlite file. Any object supporting
`__getitem__`, `__setitem__`, and `clear` will work.
loop : asyncio event loop
e.g., ``asyncio.get_event_loop()`` or ``asyncio.new_event_loop()``
preprocessors : list, optional
Generator functions that take in a plan (generator instance) and
modify its messages on the way out. Suitable examples include
the functions in the module ``bluesky.plans`` with names ending in
'wrapper'. Functions are composed in order: the preprocessors
``[f, g]`` are applied like ``f(g(plan))``.
context_managers : list, optional
Context managers that will be entered when we run a plan. The context
managers will be composed in order, much like the preprocessors. If
this argument is omitted, we will use a user-oriented handler for
SIGINT. The elements of this list will be passed this ``RunEngine``
instance as their only argument. You may pass an empty list if you
would like a ``RunEngine`` with no signal handling and no context
managers.
md_validator : callable, optional
a function that raises and prevents starting a run if it deems
the metadata to be invalid or incomplete
Expected signature: f(md)
Function should raise if md is invalid. What that means is
completely up to the user. The function's return value is
ignored.
scan_id_source : callable, optional
a function that will be used to calculate scan_id. Default is to
increment scan_id by 1 each time. However you could pass in a
customized function to get a scan_id from any source.
Expected signature: f(md)
Expected return: updated scan_id value
during_task : callable, optional
Function to be run to block the main thread during `RE.__call__`
The required signature is ::
def blocking_func(ev: Threading.Event) -> None:
"Returns when ev is set"
The default value handles the cases of:
- Matplotlib is not imported (just wait on the event)
- Matplotlib is imported, but not using a Qt, notebook or ipympl
backend (just wait on the event)
- Matplotlib is imported and using a Qt backend (run the Qt app
on the main thread until the run finishes)
- Matplotlib is imported and using a nbagg or ipympl backend (
wait on the event and poll to push updates to the browser)
Attributes
----------
md
Direct access to the dict-like persistent storage described above
record_interruptions
False by default. Set to True to generate an extra event stream
that records any interruptions (pauses, suspensions).
state
{'idle', 'running', 'paused'}
suspenders
Read-only collection of `bluesky.suspenders.SuspenderBase` objects
which can suspend and resume execution; see related methods.
preprocessors : list
Generator functions that take in a plan (generator instance) and
modify its messages on the way out. Suitable examples include
the functions in the module ``bluesky.plans`` with names ending in
'wrapper'. Functions are composed in order: the preprocessors
``[f, g]`` are applied like ``f(g(plan))``.
msg_hook
Callable that receives all messages before they are processed
(useful for logging or other development purposes); expected
signature is ``f(msg)`` where ``msg`` is a ``bluesky.Msg``, a
kind of namedtuple; default is None.
state_hook
Callable with signature ``f(new_state, old_state)`` that will be
called whenever the RunEngine's state attribute is updated; default
is None
waiting_hook
Callable with signature ``f(status_object)`` that will be called
whenever the RunEngine is waiting for long-running commands
(trigger, set, kickoff, complete) to complete. This hook is useful to
incorporate a progress bar.
ignore_callback_exceptions
Boolean, False by default.
loop : asyncio event loop
e.g., ``asyncio.get_event_loop()`` or ``asyncio.new_event_loop()``
max_depth
Maximum stack depth; set this to prevent users from calling the
RunEngine inside a function (which can result in unexpected
behavior and breaks introspection tools). Default is None.
For built-in Python interpreter, set to 2. For IPython, set to 11
(tested on IPython 5.1.0; other versions may vary).
pause_msg : str
The message printed when a run is interrupted. This message
includes instructions of changing the state of the RunEngine.
It is set to ``bluesky.run_engine.PAUSE_MSG`` by default and
can be modified based on needs.
commands:
The list of commands available to Msg.
"""
_state = LoggingPropertyMachine(RunEngineStateMachine)
_UNCACHEABLE_COMMANDS = ['pause', 'subscribe', 'unsubscribe', 'stage',
'unstage', 'monitor', 'unmonitor', 'open_run',
'close_run', 'install_suspender',
'remove_suspender']
@property
def state(self):
return self._state
def __init__(self, md=None, *, loop=None, preprocessors=None,
context_managers=None, md_validator=None,
scan_id_source=default_scan_id_source,
during_task=default_during_task):
if loop is None:
loop = get_bluesky_event_loop()
self._th = _ensure_event_loop_running(loop)
self._state_lock = threading.RLock()
self._loop = loop
self._during_task = during_task
# When set, RunEngine.__call__ should stop blocking.
self._blocking_event = threading.Event()
# When cleared, RunEngine._run will pause until set.
self._run_permit = asyncio.Event(loop=loop)
self.loop.call_soon_threadsafe(self._run_permit.set)
# Make a logger for this specific RE instance, using the instance's
# Python id, to keep from mixing output from separate instances.
logger_name = "bluesky.RE.{id}".format(id=id(self))
self.log = logging.getLogger(logger_name)
if md is None:
md = {}
self.md = md
self.md.setdefault('versions', {})
try:
import ophyd
self.md['versions']['ophyd'] = ophyd.__version__
except ImportError:
self.log.debug("Failed to import ophyd.")
from ._version import get_versions
self.md['versions']['bluesky'] = get_versions()['version']
del get_versions
if preprocessors is None:
preprocessors = []
self.preprocessors = preprocessors
if context_managers is None:
context_managers = [SigintHandler]
self.context_managers = context_managers
if md_validator is None:
md_validator = _default_md_validator
self.md_validator = md_validator
self.scan_id_source = scan_id_source
self.max_depth = None
self.msg_hook = None
self.state_hook = None
self.waiting_hook = None
self.record_interruptions = False
self.pause_msg = PAUSE_MSG
# The RunEngine keeps track of a *lot* of state.
# All flags and caches are defined here with a comment. Good luck.
self._metadata_per_call = {} # for all runs generated by one __call__
self._bundling = False # if we are in the middle of bundling readings
self._bundle_name = None # name given to event descriptor
self._deferred_pause_requested = False # pause at next 'checkpoint'
self._exception = None # stored and then raised in the _run loop
self._interrupted = False # True if paused, aborted, or failed
self._objs_read = deque() # objects read in one Event
self._read_cache = deque() # cache of obj.read() in one Event
self._asset_docs_cache = deque() # cache of obj.collect_asset_docs()
self._staged = set() # objects staged, not yet unstaged
self._objs_seen = set() # all objects seen
self._movable_objs_touched = set() # objects we moved at any point
self._uncollected = set() # objects after kickoff(), before collect()
self._run_start_uid = None # uid of currently-open run
self._run_start_uids = list() # run start uids generated by __call__
self._interruptions_desc_uid = None # uid for a special Event Desc.
self._interruptions_counter = count(1) # seq_num, special Event stream
self._describe_cache = dict() # cache of all obj.describe() output
self._config_desc_cache = dict() # " obj.describe_configuration()
self._config_values_cache = dict() # " obj.read_configuration() values
self._config_ts_cache = dict() # " obj.read_configuration() timestamps
self._descriptors = dict() # cache of {name: (objs_frozen_set, doc)}
self._monitor_params = dict() # cache of {obj: (cb, kwargs)}
self._sequence_counters = dict() # a seq_num counter per stream
self._teed_sequence_counters = dict() # for if we redo data-points
self._suspenders = set() # set holding suspenders
self._groups = defaultdict(set) # sets of Events to wait for
self._status_objs = defaultdict(set) # status objects to wait for
self._temp_callback_ids = set() # ids from CallbackRegistry
self._msg_cache = deque() # history of processed msgs for rewinding
self._rewindable_flag = True # if the RE is allowed to replay msgs
self._plan_stack = deque() # stack of generators to work off of
self._response_stack = deque() # resps to send into the plans
self._exit_status = 'success' # optimistic default
self._reason = '' # reason for abort
self._task = None # asyncio.Task associated with call to self._run
self._task_fut = None # future proxy to the task above
self._status_tasks = deque() # from self._status_object_completed
self._pardon_failures = None # will hold an asyncio.Event
self._plan = None # the plan instance from __call__
self._command_registry = {
'create': self._create,
'save': self._save,
'drop': self._drop,
'read': self._read,
'monitor': self._monitor,
'unmonitor': self._unmonitor,
'null': self._null,
'stop': self._stop,
'set': self._set,
'trigger': self._trigger,
'sleep': self._sleep,
'wait': self._wait,
'checkpoint': self._checkpoint,
'clear_checkpoint': self._clear_checkpoint,
'rewindable': self._rewindable,
'pause': self._pause,
'resume': self._resume,
'collect': self._collect,
'kickoff': self._kickoff,
'complete': self._complete,
'configure': self._configure,
'stage': self._stage,
'unstage': self._unstage,
'subscribe': self._subscribe,
'unsubscribe': self._unsubscribe,
'open_run': self._open_run,
'close_run': self._close_run,
'wait_for': self._wait_for,
'input': self._input,
'install_suspender': self._install_suspender,
'remove_suspender': self._remove_suspender, }
# public dispatcher for callbacks
# The Dispatcher's public methods are exposed through the
# RunEngine for user convenience.
self.dispatcher = Dispatcher()
self.ignore_callback_exceptions = False
# aliases for back-compatibility
self.subscribe_lossless = self.dispatcher.subscribe
self.unsubscribe_lossless = self.dispatcher.unsubscribe
self._subscribe_lossless = self.dispatcher.subscribe
self._unsubscribe_lossless = self.dispatcher.unsubscribe
@property
def commands(self):
'''
The list of commands available to Msg.
See Also
--------
:meth:`RunEngine.register_command`
:meth:`RunEngine.unregister_command`
:meth:`RunEngine.print_command_registry`
Examples
--------
>>> from bluesky import RunEngine
>>> RE = RunEngine()
>>> # to list commands
>>> RE.commands
'''
# return as a list, not lazy loader, no surprises...
return list(self._command_registry.keys())
def print_command_registry(self, verbose=False):
'''
This conveniently prints the command registry of available
commands.
Parameters
----------
Verbose : bool, optional
verbose print. Default is False
See Also
--------
:meth:`RunEngine.register_command`
:meth:`RunEngine.unregister_command`
:attr:`RunEngine.commands`
Examples
--------
>>> from bluesky import RunEngine
>>> RE = RunEngine()
>>> # Print a very verbose list of currently registered commands
>>> RE.print_command_registry(verbose=True)
'''
commands = "List of available commands\n"
for command, func in self._command_registry.items():
docstring = func.__doc__
if not verbose:
docstring = docstring.split("\n")[0]
commands = commands + "{} : {}\n".format(command, docstring)
return commands
def subscribe(self, func, name='all'):
"""
Register a callback function to consume documents.
.. versionchanged :: 0.10.0
The order of the arguments was swapped and the ``name``
argument has been given a default value, ``'all'``. Because the
meaning of the arguments is unambiguous (they must be a callable
and a string, respectively) the old order will be supported
indefinitely, with a warning.
Parameters
----------
func: callable
expecting signature like ``f(name, document)``
where name is a string and document is a dict
name : {'all', 'start', 'descriptor', 'event', 'stop'}, optional
the type of document this function should receive ('all' by
default)
Returns
-------
token : int
an integer ID that can be used to unsubscribe
See Also
--------
:meth:`RunEngine.unsubscribe`
"""
# pass through to the Dispatcher, spelled out verbosely here to make
# sphinx happy -- tricks with __doc__ aren't enough to fool it
return self.dispatcher.subscribe(func, name)
def unsubscribe(self, token):
"""
Unregister a callback function its integer ID.
Parameters
----------
token : int
the integer ID issued by :meth:`RunEngine.subscribe`
See Also
--------
:meth:`RunEngine.subscribe`
"""
# pass through to the Dispatcher, spelled out verbosely here to make
# sphinx happy -- tricks with __doc__ aren't enough to fool it
return self.dispatcher.unsubscribe(token)
@property
def rewindable(self):
return self._rewindable_flag
@rewindable.setter
def rewindable(self, v):
cur_state = self._rewindable_flag
self._rewindable_flag = bool(v)
if self.resumable and self._rewindable_flag != cur_state:
self._reset_checkpoint_state()
@property
def loop(self):
return self._loop
@property
def suspenders(self):
return tuple(self._suspenders)
@property
def verbose(self):
return not self.log.disabled
@verbose.setter
def verbose(self, value):
self.log.disabled = not value
@property
def _run_is_open(self):
return self._run_start_uid is not None
def _clear_run_cache(self):
"Clean up for a new run."
self._run_start_uid = None
self._bundling = False
self._objs_read.clear()
self._read_cache.clear()
self._asset_docs_cache.clear()
self._uncollected.clear()
self._describe_cache.clear()
self._config_desc_cache.clear()
self._config_values_cache.clear()
self._config_ts_cache.clear()
self._descriptors.clear()
self._sequence_counters.clear()
self._teed_sequence_counters.clear()
self._groups.clear()
self._status_objs.clear()
self._interruptions_desc_uid = None
self._interruptions_counter = count(1)
@_state_locked
def _clear_call_cache(self):
"Clean up for a new __call__ (which may encompass multiple runs)."
self._metadata_per_call.clear()
self._staged.clear()
self._objs_seen.clear()
self._movable_objs_touched.clear()
self._deferred_pause_requested = False
self._plan_stack = deque()
self._msg_cache = deque()
self._response_stack = deque()
self._exception = None
self._run_start_uids.clear()
self._exit_status = 'success'
self._reason = ''
self._task = None
self._task_fut = None
self._status_tasks.clear()
self._pardon_failures = asyncio.Event(loop=self.loop)
self._plan = None
self._interrupted = False
# Unsubscribe for per-run callbacks.
for cid in self._temp_callback_ids:
self.unsubscribe(cid)
self._temp_callback_ids.clear()
def reset(self):
"""
Clean up caches and unsubscribe subscriptions.
Lossless subscriptions are not unsubscribed.
"""
if self._state != 'idle':
self.halt()
self._clear_run_cache()
self._clear_call_cache()
self.dispatcher.unsubscribe_all()
@property
def resumable(self):
"i.e., can the plan in progress by rewound"
return self._msg_cache is not None
@property
def ignore_callback_exceptions(self):
return self.dispatcher.ignore_exceptions
@ignore_callback_exceptions.setter
def ignore_callback_exceptions(self, val):
self.dispatcher.ignore_exceptions = val
def register_command(self, name, func):
"""
Register a new Message command.
Parameters
----------
name : str
func : callable
This can be a function or a method. The signature is `f(msg)`.
See Also
--------
:meth:`RunEngine.unregister_command`
:meth:`RunEngine.print_command_registry`
:attr:`RunEngine.commands`
"""
self._command_registry[name] = func
def unregister_command(self, name):
"""
Unregister a Message command.
Parameters
----------
name : str
See Also
--------
:meth:`RunEngine.register_command`
:meth:`RunEngine.print_command_registry`
:attr:`RunEngine.commands`
"""
del self._command_registry[name]
def request_pause(self, defer=False):
"""
Command the Run Engine to pause.
This function is called by 'pause' Messages. It can also be called
by other threads. It cannot be called on the main thread during a run,
but it is called by SIGINT (i.e., Ctrl+C).
If there current run has no checkpoint (via the 'clear_checkpoint'
message), this will cause the run to abort.
Parameters
----------
defer : bool, optional
If False, pause immediately before processing any new messages.
If True, pause at the next checkpoint.
False by default.
"""
future = asyncio.run_coroutine_threadsafe(
self._request_pause_coro(defer),
loop=self.loop)
# TODO add a timeout here?
return future.result()
async def _request_pause_coro(self, defer=False):
# We are pausing. Cancel any deferred pause previously requested.
if defer:
self._deferred_pause_requested = True
print("Deferred pause acknowledged. Continuing to checkpoint.")
return
print("Pausing...")
self._deferred_pause_requested = False
self._interrupted = True
self._state = 'pausing'
self._record_interruption('pause')
self._task.cancel()
def _record_interruption(self, content):
"""
Emit an event in the 'interruptions' event stream.
If we are not inside a run or if self.record_interruptions is False,
nothing is done.
"""
if self._interruptions_desc_uid is not None:
# We are inside a run and self.record_interruptions is True.
doc = dict(descriptor=self._interruptions_desc_uid,
time=ttime.time(), uid=new_uid(),
seq_num=next(self._interruptions_counter),
data={'interruption': content},
timestamps={'interruption': ttime.time()})
schema_validators[DocumentNames.event].validate(doc)
self.dispatcher.process(DocumentNames.event, doc)
def __call__(self, *args, **metadata_kw):
"""Execute a plan.
Any keyword arguments will be interpreted as metadata and recorded with
any run(s) created by executing the plan. Notice that the plan
(required) and extra subscriptions (optional) must be given as
positional arguments.
Parameters
----------
plan : generator (positional only)
a generator or that yields ``Msg`` objects (or an iterable that
returns such a generator)
subs : callable, list, or dict, optional (positional only)
Temporary subscriptions (a.k.a. callbacks) to be used on this run.
For convenience, any of the following are accepted:
* a callable, which will be subscribed to 'all'
* a list of callables, which again will be subscribed to 'all'
* a dictionary, mapping specific subscriptions to callables or
lists of callables; valid keys are {'all', 'start', 'stop',
'event', 'descriptor'}
Returns
-------
uids : list
list of uids (i.e. RunStart Document uids) of run(s)
"""
if self.state == 'panicked':
raise RuntimeError("The RunEngine is panicked and "
"cannot be recovered. "
"You must restart bluesky.")
# This scheme lets us make 'plan' and 'subs' POSITIONAL ONLY, reserving
# all keyword arguments for user metdata.
arguments = _call_sig.bind(self, *args, **metadata_kw).arguments
plan = arguments['plan']
subs = arguments.get('subs', None)
metadata_kw = arguments.get('metadata_kw', {})
if 'raise_if_interrupted' in metadata_kw:
warn("The 'raise_if_interrupted' flag has been removed. The "
"RunEngine now always raises RunEngineInterrupted if it is "
"interrupted. The 'raise_if_interrupted' keyword argument, "
"like all keyword arguments, will be interpreted as "
"metadata.")
# Check that the RE is not being called from inside a function.
if self.max_depth is not None:
frame = inspect.currentframe()
depth = len(inspect.getouterframes(frame))
if depth > self.max_depth:
text = MAX_DEPTH_EXCEEDED_ERR_MSG.format(self.max_depth, depth)
raise RuntimeError(text)
# If we are in the wrong state, raise.
if not self._state.is_idle:
raise RuntimeError("The RunEngine is in a %s state" % self._state)
futs = []
tripped_justifications = []
for sup in self.suspenders:
f_lst, justification = sup.get_futures()
if f_lst:
futs.extend(f_lst)
tripped_justifications.append(justification)
if tripped_justifications:
print("At least one suspender has tripped. The plan will begin "
"when all suspenders are ready. Justification:")
for i, justification in enumerate(tripped_justifications):
print(' {}. {}'.format(i + 1, justification))
print()
print("Suspending... To get to the prompt, "
"hit Ctrl-C twice to pause.")
self._clear_call_cache()
self._clear_run_cache() # paranoia, in case of previous bad exit
for name, funcs in normalize_subs_input(subs).items():
for func in funcs:
self._temp_callback_ids.add(self.subscribe(func, name))
self._plan = plan # this ref is just used for metadata introspection
self._metadata_per_call.update(metadata_kw)
gen = ensure_generator(plan)
for wrapper_func in self.preprocessors:
gen = wrapper_func(gen)
self._plan_stack.append(gen)
self._response_stack.append(None)
if futs:
self._plan_stack.append(single_gen(Msg('wait_for', None, futs)))
self._response_stack.append(None)
self.log.info("Executing plan %r", self._plan)
def _build_task():
# make sure _run will block at the top
self._run_permit.clear()
self._blocking_event.clear()
self._task_fut = asyncio.run_coroutine_threadsafe(self._run(),
loop=self.loop)
def set_blocking_event(future):
self._blocking_event.set()
self._task_fut.add_done_callback(set_blocking_event)
self._resume_task(init_func=_build_task)
if self._interrupted:
raise RunEngineInterrupted(self.pause_msg) from None
return tuple(self._run_start_uids)
__call__.__signature__ = _call_sig
def resume(self):
"""Resume a paused plan from the last checkpoint.
Returns
-------
uids : list
list of Header uids (a.k.a RunStart uids) of run(s)
"""
if self.state == 'panicked':
raise RuntimeError("The RunEngine is panicked and "
"cannot be recovered. "
"You must restart bluesky.")
# The state machine does not capture the whole picture.
if not self._state.is_paused:
raise TransitionError("The RunEngine is the {0} state. "
"You can only resume for the paused state."
"".format(self._state))
self._interrupted = False
self._record_interruption('resume')
new_plan = self._rewind()
self._plan_stack.append(new_plan)
self._response_stack.append(None)
# Re-instate monitoring callbacks.
for obj, (cb, kwargs) in self._monitor_params.items():
obj.subscribe(cb, **kwargs)
# Notify Devices of the resume in case they want to clean up.
for obj in self._objs_seen:
if hasattr(obj, 'resume'):
obj.resume()
self._resume_task()
if self._interrupted:
raise RunEngineInterrupted(self.pause_msg) from None
return tuple(self._run_start_uids)
def _rewind(self):
'''Clean up in preparation for resuming from a pause or suspension.
Returns
-------
new_plan : generator
A new plan made from the messages in the message cache
'''
len_msg_cache = len(self._msg_cache)
new_plan = ensure_generator(list(self._msg_cache))
self._msg_cache = deque()
if len_msg_cache:
self._sequence_counters.clear()
self._sequence_counters.update(self._teed_sequence_counters)
# This is needed to 'cancel' an open bundling (e.g. create) if
# the pause happens after a 'checkpoint', after a 'create', but
# before the paired 'save'.
self._bundling = False
return new_plan
def _resume_task(self, *, init_func=None):
# Clear the blocking Event so that we can wait on it below.
# The task will set it when it is done, as it was previously
# configured to do it __call__.
self._blocking_event.clear()
# Handle all context managers
with ExitStack() as stack:
for mgr in self.context_managers:
stack.enter_context(mgr(self))
if init_func is not None:
init_func()
if self._task_fut is None or self._task_fut.done():
return
# The _run task is waiting on this Event. Let is continue.
self.loop.call_soon_threadsafe(self._run_permit.set)
try:
# Block until plan is complete or exception is raised.
try:
self._during_task(self._blocking_event)
except KeyboardInterrupt:
import ctypes
self._interrupted = True
# we can not interrupt a python thread from the outside
# but there is an API to schedule an exception to be raised
# the next time that thread would interpret byte code.
# The documentation of this function includes the sentence
#
# To prevent naive misuse, you must write your
# own C extension to call this.
#
# Here we cheat a bit and use ctypes.
num_threads = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_ulong(self._th.ident),
ctypes.py_object(_RunEnginePanic))
# however, if the thread is in a system call (such
# as sleep or I/O) there is no way to interrupt it
# (per decree of Guido) thus we give it a second
# to sort it's self out
task_finished = self._blocking_event.wait(1)
# before giving up and putting the RE in a
# non-recoverable panicked state.
if not task_finished or num_threads != 1:
self._state = 'panicked'
except Exception as raised_er:
self.halt()
self._interrupted = True
raise raised_er
finally:
if self._task_fut.done():
# get exceptions from the main task
try:
exc = self._task_fut.exception()
except (asyncio.CancelledError,
concurrent.futures.CancelledError):
exc = None
# if the main task exception is not None, re-raise
# it (unless it is a canceled error)
if (exc is not None
and not isinstance(exc, _RunEnginePanic)):
raise exc
def install_suspender(self, suspender):
"""
Install a 'suspender', which can suspend and resume execution.
Parameters
----------
suspender : `bluesky.suspenders.SuspenderBase`
See Also
--------
:meth:`RunEngine.remove_suspender`
:meth:`RunEngine.clear_suspenders`
"""
self._suspenders.add(suspender)
suspender.install(self)
@asyncio.coroutine
def _install_suspender(self, msg):
"""
See :meth: `RunEngine.install_suspender`
Expected message object is:
Msg('install_suspender', None, suspender)
"""
suspender = msg.args[0]
self.install_suspender(suspender)
def remove_suspender(self, suspender):
"""
Uninstall a suspender.
Parameters
----------
suspender : `bluesky.suspenders.SuspenderBase`
See Also
--------
:meth:`RunEngine.install_suspender`
:meth:`RunEngine.clear_suspenders`
"""
if suspender in self._suspenders:
suspender.remove()
self._suspenders.discard(suspender)
async def _remove_suspender(self, msg):
"""
See :meth: `RunEngine.remove_suspender`
Expected message object is:
Msg('remove_suspender', None, suspender)
"""
suspender = msg.args[0]
self.remove_suspender(suspender)
def clear_suspenders(self):
"""
Uninstall all suspenders.
See Also
--------
:meth:`RunEngine.install_suspender`
:meth:`RunEngine.remove_suspender`
"""
for sus in self.suspenders:
self.remove_suspender(sus)
def request_suspend(self, fut, *, pre_plan=None, post_plan=None,
justification=None):
"""Request that the run suspend itself until the future is finished.
The two plans will be run before and after waiting for the future.
This enable doing things like opening and closing shutters and
resetting cameras around a suspend.
Parameters
----------
fut : asyncio.Future
pre_plan : iterable or callable, optional
Plan to execute just before suspending. If callable, must
take no arguments.
post_plan : iterable or callable, optional
Plan to execute just before resuming. If callable, must
take no arguments.
justification : str, optional
explanation of why the suspension has been requested
"""
print("Suspending....To get prompt hit Ctrl-C twice to pause.")
ts = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("Suspension occurred at {}.".format(ts))
async def _request_suspend(pre_plan, post_plan, justification):
if not self.resumable:
print("No checkpoint; cannot suspend.")
print("Aborting: running cleanup and marking "
"exit_status as 'abort'...")
self._interrupted = True
with self._state_lock:
self._exception = FailedPause()
was_paused = self._state == 'paused'
self._state = 'aborting'
if not was_paused:
self._task.cancel()
if justification is not None:
print("Justification for this suspension:\n%s" % justification)
self._record_interruption('suspend')
# Stash a copy in a local var to re-instating the monitors.
for obj, (cb, kwargs) in list(self._monitor_params.items()):
obj.clear_sub(cb)
# During suspend, all motors should be stopped. Call stop() on
# every object we ever set().
self._stop_movable_objects(success=True)
# Notify Devices of the pause in case they want to clean up.
for obj in self._objs_seen:
if hasattr(obj, 'pause'):
try:
obj.pause()
except NoReplayAllowed:
self._reset_checkpoint_state_meth()
# rewind to the last checkpoint
new_plan = self._rewind()
# queue up the cached messages
self._plan_stack.append(new_plan)
self._response_stack.append(None)
self._plan_stack.append(single_gen(
Msg('rewindable', None, self.rewindable)))
self._response_stack.append(None)
# if there is a post plan add it between the wait
# and the cached messages
if post_plan is not None:
if callable(post_plan):
post_plan = post_plan()
self._plan_stack.append(ensure_generator(post_plan))
self._response_stack.append(None)
# tell the devices they are ready to go again
self._plan_stack.append(single_gen(Msg('resume', None, )))
self._response_stack.append(None)
# add the wait on the future to the stack
self._plan_stack.append(single_gen(Msg('wait_for', None, [fut, ])))
self._response_stack.append(None)
# if there is a pre plan add on top of the wait
if pre_plan is not None:
if callable(pre_plan):
pre_plan = pre_plan()
self._plan_stack.append(ensure_generator(pre_plan))
self._response_stack.append(None)
self._plan_stack.append(single_gen(
Msg('rewindable', None, False)))
self._response_stack.append(None)
# The event loop is still running. The pre_plan will be processed,
# and then the RunEngine will be hung up on processing the
# 'wait_for' message until `fut` is set.
if not self._state == 'paused':
self._state = 'suspending'
# bump the _run task out of what ever it is awaiting
self._task.cancel()
self.loop.call_soon_threadsafe(
self.loop.create_task,
_request_suspend(pre_plan, post_plan, justification))
def abort(self, reason=''):
"""
Stop a running or paused plan and mark it as aborted.
See Also
--------
:meth:`RunEngine.halt`
:meth:`RunEngine.stop`
"""
return self.__interrupter_helper(self._abort_coro(reason))
async def _abort_coro(self, reason):
if self._state.is_idle:
raise TransitionError("RunEngine is already idle.")
print("Aborting: running cleanup and marking "
"exit_status as 'abort'...")
self._interrupted = True
self._reason = reason
self._exit_status = 'abort'
was_paused = self._state == 'paused'
self._state = 'aborting'
if was_paused:
with self._state_lock:
self._exception = RequestAbort()
else:
self._task.cancel()
for task in self._status_tasks:
task.cancel()
return tuple(self._run_start_uids)
def stop(self):
"""
Stop a running or paused plan, but mark it as successful (not aborted).
See Also
--------
:meth:`RunEngine.abort`
:meth:`RunEngine.halt`
"""
return self.__interrupter_helper(self._stop_coro())
async def _stop_coro(self):
if self._state.is_idle:
raise TransitionError("RunEngine is already idle.")
print("Stopping: running cleanup and marking exit_status "
"as 'success'...")
self._interrupted = True
was_paused = self._state == 'paused'
self._state = 'stopping'
if not was_paused:
self._task.cancel()
return tuple(self._run_start_uids)
def halt(self):
'''
Stop the running plan and do not allow the plan a chance to clean up.
See Also
--------
:meth:`RunEngine.abort`
:meth:`RunEngine.stop`
'''
return self.__interrupter_helper(self._halt_coro())
def __interrupter_helper(self, coro):
if self.state == 'panicked':
raise RuntimeError("The RunEngine is panicked and "
"cannot be recovered. "
"You must restart bluesky.")
coro_event = threading.Event()
task = None
def end_cb(fut):
coro_event.set()
def start_task():
nonlocal task
task = self.loop.create_task(coro)
task.add_done_callback(end_cb)
was_paused = self._state == 'paused'
self.loop.call_soon_threadsafe(start_task)
coro_event.wait()
if was_paused:
self._resume_task()
return task.result()
async def _halt_coro(self):
if self._state.is_idle:
raise TransitionError("RunEngine is already idle.")
print("Halting: skipping cleanup and marking exit_status as "
"'abort'...")
self._interrupted = True
with self._state_lock:
self._exception = PlanHalt()
self._exit_status = 'abort'
was_paused = self._state == 'paused'
self._state = 'halting'
if not was_paused:
self._task.cancel()
return tuple(self._run_start_uids)
def _stop_movable_objects(self, *, success=True):
"Call obj.stop() for all objects we have moved. Log any exceptions."
for obj in self._movable_objs_touched:
try:
stop = obj.stop
except AttributeError:
self.log.debug("No 'stop' method available on %r", obj)
else:
try:
stop(success=success)
except Exception:
self.log.exception("Failed to stop %r.", obj)
async def _run(self):
"""Pull messages from the plan, process them, send results back.
Upon exit, clean up.
- Call stop() on all objects that were 'set' or 'kickoff'.
- Try to collect any uncollected flyers.
- Try to unstage any devices left staged by the plan.
- Try to remove any monitoring subscriptions left on by the plan.
- If interrupting the middle of a run, try to emit a RunStop document.
"""
await self._run_permit.wait()
# grab the current task. We need to do this here because the
# object returned by `run_coroutine_threadsafe` is a future
# that acts as a proxy that does not have the correct behavior
# when `.cancel` is called on it.
with self._state_lock:
self._task = current_task(self.loop)
stashed_exception = None
debug = logging.getLogger('{}.msg'.format(self.log.name)).debug
self._reason = ''
# sentinel to decide if need to add to the response stack or not
sentinel = object()
try:
self._state = 'running'
while True:
if self._state in ('pausing', 'suspending'):
if not self.resumable:
self._run_permit.set()
stashed_exception = FailedPause()
for task in self._status_tasks:
task.cancel()
self._state = 'aborting'
continue
# currently only using 'suspending' to get us into the
# block above, we do not have a 'suspended' state
# (yet)
if self._state == 'suspending':
self._state = 'running'
if not self._run_permit.is_set():
# A pause has been requested. First, put everything in a
# resting state.
assert self._state == 'pausing'
# Remove any monitoring callbacks, but keep refs in
# self._monitor_params to re-instate them later.
for obj, (cb, kwargs) in self._monitor_params.items():
obj.clear_sub(cb)
# During pause, all motors should be stopped. Call stop()
# on every object we ever set().
self._stop_movable_objects(success=True)
# Notify Devices of the pause in case they want to
# clean up.
for obj in self._objs_seen:
if hasattr(obj, 'pause'):
try:
obj.pause()
except NoReplayAllowed:
self._reset_checkpoint_state_meth()
self._state = 'paused'
# Let RunEngine.__call__ return...
self._blocking_event.set()
await self._run_permit.wait()
if self._state == 'paused':
# may be called by 'resume', 'stop', 'abort', 'halt'
self._state = 'running'
# If we are here, we have come back to life either to
# continue (resume) or to clean up before exiting.
assert len(self._response_stack) == len(self._plan_stack)
# set resp to the sentinel so that if we fail in the sleep
# we do not add an extra response
resp = sentinel
try:
# the new response to be added
new_response = None
# This 'await' must be here to ensure that
# this coroutine breaks out of its current behavior
# before trying to get the next message from the
# top of the generator stack in case there has
# been a pause requested. Without this the next
# message after the pause may be processed first
# on resume (instead of the first message in
# self._msg_cache).
# This sleep has to be inside of this try block so
# that any of the 'async' exceptions get thrown in the
# correct place
await asyncio.sleep(0, loop=self.loop)
# always pop off a result, we are either sending it back in
# or throwing an exception in, in either case the left hand
# side of the yield in the plan will be moved past
resp = self._response_stack.pop()
# if any status tasks have failed, grab the exceptions.
# give priority to things pushed in from outside
with self._state_lock:
if self._exception is not None:
stashed_exception = self._exception
self._exception = None
# The case where we have a stashed exception
if (stashed_exception is not None or
isinstance(resp, Exception)):
# throw the exception at the current plan
try:
msg = self._plan_stack[-1].throw(
stashed_exception or resp)
except Exception as e:
# The current plan did not handle it,
# maybe the next plan (if any) would like
# to try
self._plan_stack.pop()
# we have killed the current plan, do not give
# it a new response
resp = sentinel
if len(self._plan_stack):
stashed_exception = e
continue
# no plans left and still an unhandled exception
# re-raise to exit the infinite loop
else:
raise
# clear the stashed exception, the top plan
# handled it.
else:
stashed_exception = None
# The normal case of clean operation
else:
try:
msg = self._plan_stack[-1].send(resp)
# We have exhausted the top generator
except StopIteration:
# pop the dead generator go back to the top
self._plan_stack.pop()
# we have killed the current plan, do not give
# it a new response
resp = sentinel
if len(self._plan_stack):
continue
# or reraise to get out of the infinite loop
else:
raise
# Any other exception that comes out of the plan
except Exception as e:
# pop the dead plan, stash the exception and
# go to the top of the loop
self._plan_stack.pop()
# we have killed the current plan, do not give
# it a new response
resp = sentinel
if len(self._plan_stack):
stashed_exception = e
continue
# or reraise to get out of the infinite loop
else:
raise
# if we have a message hook, call it
if self.msg_hook is not None:
self.msg_hook(msg)
debug(msg)
# update the running set of all objects we have seen
self._objs_seen.add(msg.obj)
# if this message can be cached for rewinding, cache it
if (self._msg_cache is not None and
self._rewindable_flag and
msg.command not in self._UNCACHEABLE_COMMANDS):
# We have a checkpoint.
self._msg_cache.append(msg)
# try to look up the coroutine to execute the command
try:
coro = self._command_registry[msg.command]
# replace KeyError with a local sub-class and go
# to top of the loop
except KeyError:
# TODO make this smarter
new_response = InvalidCommand(msg.command)
continue
# try to finally run the command the user asked for
try:
# this is one of two places that 'async'
# exceptions (coming in via throw) can be
# raised
new_response = await coro(msg)
# special case `CancelledError` and let the outer
# exception block deal with it.
except asyncio.CancelledError:
raise
# any other exception, stash it and go to the top of loop
except Exception as e:
new_response = e
continue
# normal use, if it runs cleanly, stash the response and
# go to the top of the loop
else:
continue
except KeyboardInterrupt:
# This only happens if some external code captures SIGINT
# -- overriding the RunEngine -- and then raises instead
# of (properly) calling the RunEngine's handler.
# See https://github.com/NSLS-II/bluesky/pull/242
print("An unknown external library has improperly raised "
"KeyboardInterrupt. Intercepting and triggering "
"a HALT.")
await self._halt_coro()
except asyncio.CancelledError as e:
if self._state == 'pausing':
# if we got a CancelledError and we are in the
# 'pausing' state clear the run permit and
# bounce to the top
self._run_permit.clear()
continue
if self._state in ('halting', 'stopping', 'aborting'):
# if we got this while just keep going in tear-down
exception_map = {'halting': PlanHalt,
'stopping': RequestStop,
'aborting': RequestAbort}
# if the exception is not set bounce to the top
if stashed_exception is None:
stashed_exception = exception_map[self.state]
continue
if self._state == 'suspending':
# just bounce to the top
continue
# if we are handling this twice, raise and leave the plans
# alone
if stashed_exception is e:
raise e
# the case where FailedPause, RequestAbort or a coro
# raised error is not already stashed in _exception
if stashed_exception is None:
stashed_exception = e
finally:
# if we poped a response and did not pop a plan, we need
# to put the new response back on the stack
if resp is not sentinel:
self._response_stack.append(new_response)
except (StopIteration, RequestStop):
self._exit_status = 'success'
# TODO Is the sleep here necessary?
await asyncio.sleep(0, loop=self.loop)
except (FailedPause, RequestAbort, asyncio.CancelledError,
PlanHalt):
self._exit_status = 'abort'
# TODO Is the sleep here necessary?
await asyncio.sleep(0, loop=self.loop)
self.log.exception("Run aborted")
except GeneratorExit as err:
self._exit_status = 'fail' # Exception raises during 'running'
self._reason = str(err)
raise ValueError from err
except Exception as err:
self._exit_status = 'fail' # Exception raises during 'running'
self._reason = str(err)
self.log.exception("Run aborted")
raise err
finally:
# Some done_callbacks may still be alive in other threads.
# Block them from creating new 'failed status' tasks on the loop.
self._pardon_failures.set()
# call stop() on every movable object we ever set()
self._stop_movable_objects(success=True)
# Try to collect any flyers that were kicked off but not finished.
# Some might not support partial collection. We swallow errors.
for obj in list(self._uncollected):
try:
await self._collect(Msg('collect', obj))
except Exception:
self.log.exception("Failed to collect %r.", obj)
# in case we were interrupted between 'stage' and 'unstage'
for obj in list(self._staged):
try:
obj.unstage()
except Exception:
self.log.exception("Failed to unstage %r.", obj)
self._staged.remove(obj)
# Clear any uncleared monitoring callbacks.
for obj, (cb, kwargs) in list(self._monitor_params.items()):
try:
obj.clear_sub(cb)
except Exception:
self.log.exception("Failed to stop monitoring %r.", obj)
else:
del self._monitor_params[obj]
sys.stdout.flush()
# Emit RunStop if necessary.
if self._run_is_open:
try:
await self._close_run(Msg('close_run'))
except Exception:
self.log.error(
"Failed to close run %r.", self._run_start_uid)
for p in self._plan_stack:
try:
p.close()
except RuntimeError:
print('The plan {!r} tried to yield a value on close. '
'Please fix your plan.'.format(p))
self._state = 'idle'
self.log.info("Cleaned up from plan %r", self._plan)
if isinstance(stashed_exception, asyncio.CancelledError):
raise stashed_exception
async def _wait_for(self, msg):
"""Instruct the RunEngine to wait for futures
Expected message object is:
Msg('wait_for', None, awaitable_factories, **kwargs)
The keyword arguments will be passed through to `asyncio.wait`.
The callables in awaitable_factories must have the signature ::
def fut_fac() -> awaitable:
'This must work multiple times'
"""
futs, = msg.args
futs = [f() for f in futs]
await asyncio.wait(futs, loop=self.loop, **msg.kwargs)
async def _open_run(self, msg):
"""Instruct the RunEngine to start a new "run"
Expected message object is:
Msg('open_run', None, **kwargs)
where **kwargs are any additional metadata that should go into
the RunStart document
"""
if self._run_is_open:
raise IllegalMessageSequence("A 'close_run' message was not "
"received before the 'open_run' "
"message")
self._clear_run_cache()
self._run_start_uid = new_uid()
self._run_start_uids.append(self._run_start_uid)
# Run scan_id calculation method
self.md['scan_id'] = self.scan_id_source(self.md)
# For metadata below, info about plan passed to self.__call__ for.
plan_type = type(self._plan).__name__
plan_name = getattr(self._plan, '__name__', '')
# Combine metadata, in order of decreasing precedence:
md = ChainMap(self._metadata_per_call, # from kwargs to self.__call__
msg.kwargs, # from 'open_run' Msg
{'plan_type': plan_type, # computed from self._plan
'plan_name': plan_name},
self.md) # stateful, persistent metadata
# The metadata is final. Validate it now, at the last moment.
# Use copy for some reasonable (admittedly not total) protection
# against users mutating the md with their validator.
self.md_validator(dict(md))
doc = dict(uid=self._run_start_uid, time=ttime.time(), **md)
await self.emit(DocumentNames.start, doc)
self.log.debug("Emitted RunStart (uid=%r)", doc['uid'])
await self._reset_checkpoint_state_coro()
# Emit an Event Descriptor for recording any interruptions as Events.
if self.record_interruptions:
self._interruptions_desc_uid = new_uid()
dk = {'dtype': 'string', 'shape': [], 'source': 'RunEngine'}
interruptions_desc = dict(time=ttime.time(),
uid=self._interruptions_desc_uid,
name='interruptions',
data_keys={'interruption': dk},
run_start=self._run_start_uid)
await self.emit(DocumentNames.descriptor, interruptions_desc)
return self._run_start_uid
async def _close_run(self, msg):
"""Instruct the RunEngine to write the RunStop document
Expected message object is:
Msg('close_run', None, exit_status=None, reason=None)
if *exit_stats* and *reason* are not provided, use the values
stashed on the RE.
"""
if not self._run_is_open:
raise IllegalMessageSequence("A 'close_run' message was received "
"but there is no run open. If this "
"occurred after a pause/resume, add "
"a 'checkpoint' message after the "
"'close_run' message.")
self.log.debug("Stopping run %r", self._run_start_uid)
# Clear any uncleared monitoring callbacks.
for obj, (cb, kwargs) in list(self._monitor_params.items()):
obj.clear_sub(cb)
del self._monitor_params[obj]
# Count the number of Events in each stream.
num_events = {}
for bundle_name, counter in self._sequence_counters.items():
if bundle_name is None:
# rare but possible via Msg('create', name='primary')
continue
num_events[bundle_name] = next(counter) - 1
reason = msg.kwargs.get('reason', None)
if reason is None:
reason = self._reason
exit_status = msg.kwargs.get('exit_status', None)
if exit_status is None:
exit_status = self._exit_status
doc = dict(run_start=self._run_start_uid,
time=ttime.time(), uid=new_uid(),
exit_status=exit_status,
reason=reason,
num_events=num_events)
self._clear_run_cache()
await self.emit(DocumentNames.stop, doc)
self.log.debug("Emitted RunStop (uid=%r)", doc['uid'])
await self._reset_checkpoint_state_coro()
return doc['run_start']
async def _create(self, msg):
"""Trigger the run engine to start bundling future obj.read() calls for
an Event document
Expected message object is:
Msg('create', None, name='primary')
Msg('create', name='primary')
Note that the `name` kwarg will be the 'name' field of the resulting
descriptor. So descriptor['name'] = msg.kwargs['name'].
Also note that changing the 'name' of the Event will create a new
Descriptor document.
"""
if not self._run_is_open:
raise IllegalMessageSequence("Cannot bundle readings without "
"an open run. That is, 'create' must "
"be preceded by 'open_run'.")
if self._bundling:
raise IllegalMessageSequence("A second 'create' message is not "
"allowed until the current event "
"bundle is closed with a 'save' or "
'drop' "message.")
self._read_cache.clear()
self._asset_docs_cache.clear()
self._objs_read.clear()
self._bundling = True
command, obj, args, kwargs = msg
try:
self._bundle_name = kwargs['name']
except KeyError:
try:
self._bundle_name, = args
except ValueError:
raise ValueError(
"Msg('create') now requires a stream name, given as "
"Msg('create', name) or Msg('create', name=name)") from None
async def _read(self, msg):
"""
Add a reading to the open event bundle.
Expected message object is:
Msg('read', obj)
"""
obj = msg.obj
# actually _read_ the object
ret = obj.read(*msg.args, **msg.kwargs)
if ret is None:
raise RuntimeError(
"The read of {nm} returned None. ".format(nm=obj.name) +
"This is a bug in your object implementation, "
"`read` must return a dictionary ")
if self._bundling:
# if the object is not in the _describe_cache, cache it
if obj not in self._describe_cache:
# Validate that there is no data key name collision.
data_keys = obj.describe()
self._describe_cache[obj] = data_keys
self._config_desc_cache[obj] = obj.describe_configuration()
self._cache_config(obj)
# check that current read collides with nothing else in
# current event
cur_keys = set(self._describe_cache[obj].keys())
for read_obj in self._objs_read:
# that is, field names
known_keys = self._describe_cache[read_obj].keys()
if set(known_keys) & cur_keys:
raise ValueError("Data keys (field names) from {0!r} "
"collide with those from {1!r}"
"".format(obj, read_obj))
# add this object to the cache of things we have read
self._objs_read.append(obj)
# Stash the results, which will be emitted the next time _save is
# called --- or never emitted if _drop is called instead.
self._read_cache.append(ret)
# Ask the object for any resource or datum documents is has cached
# and cache them as well. Likewise, these will be emitted if and
# when _save is called.
if hasattr(obj, 'collect_asset_docs'):
self._asset_docs_cache.extend(
obj.collect_asset_docs(*msg.args, **msg.kwargs))
return ret
def _cache_config(self, obj):
"Read the object's configuration and cache it."
config_values = {}
config_ts = {}
for key, val in obj.read_configuration().items():
config_values[key] = val['value']
config_ts[key] = val['timestamp']
self._config_values_cache[obj] = config_values
self._config_ts_cache[obj] = config_ts
async def _monitor(self, msg):
"""
Monitor a signal. Emit event documents asynchronously.
A descriptor document is emitted immediately. Then, a closure is
defined that emits Event documents associated with that descriptor
from a separate thread. This process is not related to the main
bundling process (create/read/save).
Expected message object is:
Msg('monitor', obj, **kwargs)
Msg('monitor', obj, name='event-stream-name', **kwargs)
where kwargs are passed through to ``obj.subscribe()``
"""
obj = msg.obj
if msg.args:
raise ValueError("The 'monitor' Msg does not accept positional "
"arguments.")
kwargs = dict(msg.kwargs)
name = kwargs.pop('name', short_uid('monitor'))
if not self._run_is_open:
raise IllegalMessageSequence("A 'monitor' message was sent but no "
"run is open.")
if obj in self._monitor_params:
raise IllegalMessageSequence("A 'monitor' message was sent for {}"
"which is already monitored".format(
obj))
descriptor_uid = new_uid()
data_keys = obj.describe()
config = {obj.name: {'data': {}, 'timestamps': {}}}
config[obj.name]['data_keys'] = obj.describe_configuration()
for key, val in obj.read_configuration().items():
config[obj.name]['data'][key] = val['value']
config[obj.name]['timestamps'][key] = val['timestamp']
object_keys = {obj.name: list(data_keys)}
hints = {}
if hasattr(obj, 'hints'):
hints.update({obj.name: obj.hints})
desc_doc = dict(run_start=self._run_start_uid, time=ttime.time(),
data_keys=data_keys, uid=descriptor_uid,
configuration=config, hints=hints, name=name,
object_keys=object_keys)
self.log.debug("Emitted Event Descriptor with name %r containing "
"data keys %r (uid=%r)", name, data_keys.keys(),
descriptor_uid)
seq_num_counter = count(1)
def emit_event(*args, **kwargs):
# Ignore the inputs. Use this call as a signal to call read on the
# object, a crude way to be sure we get all the info we need.
data, timestamps = _rearrange_into_parallel_dicts(obj.read())
doc = dict(descriptor=descriptor_uid,
time=ttime.time(), data=data, timestamps=timestamps,
seq_num=next(seq_num_counter), uid=new_uid())
schema_validators[DocumentNames.event].validate(doc)
self.dispatcher.process(DocumentNames.event, doc)
self._monitor_params[obj] = emit_event, kwargs
await self.emit(DocumentNames.descriptor, desc_doc)
obj.subscribe(emit_event, **kwargs)
await self._reset_checkpoint_state_coro()
async def _unmonitor(self, msg):
"""
Stop monitoring; i.e., remove the callback emitting event documents.
Expected message object is:
Msg('unmonitor', obj)
"""
obj = msg.obj
if obj not in self._monitor_params:
raise IllegalMessageSequence("Cannot 'unmonitor' %r; it is not "
"being monitored." % obj)
cb, kwargs = self._monitor_params[obj]
obj.clear_sub(cb)
del self._monitor_params[obj]
await self._reset_checkpoint_state_coro()
async def _save(self, msg):
"""Save the event that is currently being bundled
Expected message object is:
Msg('save')
"""
if not self._bundling:
raise IllegalMessageSequence("A 'create' message must be sent, to "
"open an event bundle, before that "
"bundle can be saved with 'save'.")
if not self._run_is_open:
# sanity check -- this should be caught by 'create' which makes
# this code path impossible
raise IllegalMessageSequence("A 'save' message was sent but no "
"run is open.")
# Short-circuit if nothing has been read. (Do not create empty Events.)
if not self._objs_read:
self._bundling = False
self._bundle_name = None
return
# The Event Descriptor is uniquely defined by the set of objects
# read in this Event grouping.
objs_read = frozenset(self._objs_read)
# Event Descriptor documents
desc_key = self._bundle_name
# This is a separate check because it can be reset on resume.
seq_num_key = desc_key
if seq_num_key not in self._sequence_counters:
counter = count(1)
counter_copy1, counter_copy2 = tee(counter)
self._sequence_counters[seq_num_key] = counter_copy1
self._teed_sequence_counters[seq_num_key] = counter_copy2
self._bundling = False
self._bundle_name = None
d_objs, doc = self._descriptors.get(desc_key, (None, None))
if d_objs is not None and d_objs != objs_read:
raise RuntimeError("Mismatched objects read, expected {!s}, "
"got {!s}".format(d_objs, objs_read))
if doc is None:
# We don't not have an Event Descriptor for this set.
data_keys = {}
config = {}
object_keys = {}
hints = {}
for obj in objs_read:
dks = self._describe_cache[obj]
name = obj.name
# dks is an OrderedDict. Record that order as a list.
object_keys[obj.name] = list(dks)
for field, dk in dks.items():
dk['object_name'] = name
data_keys.update(dks)
config[name] = {}
config[name]['data'] = self._config_values_cache[obj]
config[name]['timestamps'] = self._config_ts_cache[obj]
config[name]['data_keys'] = self._config_desc_cache[obj]
if hasattr(obj, 'hints'):
hints[name] = obj.hints
descriptor_uid = new_uid()
doc = dict(run_start=self._run_start_uid, time=ttime.time(),
data_keys=data_keys, uid=descriptor_uid,
configuration=config, name=desc_key,
hints=hints, object_keys=object_keys)
await self.emit(DocumentNames.descriptor, doc)
self.log.debug("Emitted Event Descriptor with name %r containing "
"data keys %r (uid=%r)", desc_key,
data_keys.keys(), descriptor_uid)
self._descriptors[desc_key] = (objs_read, doc)
descriptor_uid = doc['uid']
# Resource and Datum documents
for name, doc in self._asset_docs_cache:
# Add a 'run_start' field to the resource document on its way out.
if name == 'resource':
doc['run_start'] = self._run_start_uid
await self.emit(DocumentNames(name), doc)
# Event documents
seq_num = next(self._sequence_counters[seq_num_key])
event_uid = new_uid()
# Merge list of readings into single dict.
readings = {k: v for d in self._read_cache for k, v in d.items()}
for key in readings:
readings[key]['value'] = readings[key]['value']
data, timestamps = _rearrange_into_parallel_dicts(readings)
# Mark all externally-stored data as not filled so that consumers
# know that the corresponding data are identifies, not dereferenced
# data.
filled = {k: False
for k, v in
self._descriptors[desc_key][1]['data_keys'].items()
if 'external' in v}
doc = dict(descriptor=descriptor_uid,
time=ttime.time(), data=data, timestamps=timestamps,
seq_num=seq_num, uid=event_uid, filled=filled)
await self.emit(DocumentNames.event, doc)
self.log.debug("Emitted Event with data keys %r (uid=%r)", data.keys(),
event_uid)
async def _drop(self, msg):
"""Drop the event that is currently being bundled
Expected message object is:
Msg('drop')
"""
if not self._bundling:
raise IllegalMessageSequence("A 'create' message must be sent, to "
"open an event bundle, before that "
"bundle can be dropped with 'drop'.")
if not self._run_is_open:
# sanity check -- this should be caught by 'create' which makes
# this code path impossible
raise IllegalMessageSequence("A 'drop' message was sent but no "
"run is open.")
self._bundling = False
self._bundle_name = None
self.log.debug("Dropped open event bundle")
async def _kickoff(self, msg):
"""Start a flyscan object
Parameters
----------
msg : Msg
Special kwargs for the 'Msg' object in this function:
group : str
The blocking group to this flyer to
Expected message object is:
If `flyer_object` has a `kickoff` function that takes no arguments:
Msg('kickoff', flyer_object)
Msg('kickoff', flyer_object, group=<name>)
If `flyer_object` has a `kickoff` function that takes
`(start, stop, steps)` as its function arguments:
Msg('kickoff', flyer_object, start, stop, step)
Msg('kickoff', flyer_object, start, stop, step, group=<name>)
"""
if not self._run_is_open:
raise IllegalMessageSequence("A 'kickoff' message was sent but no "
"run is open.")
_, obj, args, kwargs = msg
self._uncollected.add(obj)
kwargs = dict(msg.kwargs)
group = kwargs.pop('group', None)
ret = obj.kickoff(*msg.args, **kwargs)
p_event = asyncio.Event(loop=self.loop)
pardon_failures = self._pardon_failures
def done_callback():
self.log.debug("The object %r reports 'kickoff' is done "
"with status %r", msg.obj, ret.success)
task = self._loop.call_soon_threadsafe(
self._status_object_completed, ret, p_event, pardon_failures)
self._status_tasks.append(task)
try:
ret.add_callback(done_callback)
except AttributeError:
# for ophyd < v0.8.0
ret.finished_cb = done_callback
self._groups[group].add(p_event.wait)
self._status_objs[group].add(ret)
return ret
async def _complete(self, msg):
"""
Tell a flyer, 'stop collecting, whenever you are ready'.
The flyer returns a status object. Some flyers respond to this
command by stopping collection and returning a finished status
object immediately. Other flyers finish their given course and
finish whenever they finish, irrespective of when this command is
issued.
Expected message object is:
Msg('complete', flyer, group=<GROUP>)
where <GROUP> is a hashable identifier.
"""
kwargs = dict(msg.kwargs)
group = kwargs.pop('group', None)
ret = msg.obj.complete(*msg.args, **kwargs)
p_event = asyncio.Event(loop=self.loop)
pardon_failures = self._pardon_failures
def done_callback():
self.log.debug("The object %r reports 'complete' is done "
"with status %r", msg.obj, ret.success)
task = self._loop.call_soon_threadsafe(
self._status_object_completed, ret, p_event, pardon_failures)
self._status_tasks.append(task)
try:
ret.add_callback(done_callback)
except AttributeError:
# for ophyd < v0.8.0
ret.finished_cb = done_callback
self._groups[group].add(p_event.wait)
self._status_objs[group].add(ret)
return ret
async def _collect(self, msg):
"""
Collect data cached by a flyer and emit descriptor and event documents.
Expect message object is
Msg('collect', obj)
Msg('collect', obj, stream=True)
"""
obj = msg.obj
if not self._run_is_open:
# sanity check -- 'kickoff' should catch this and make this
# code path impossible
raise IllegalMessageSequence("A 'collect' message was sent but no "
"run is open.")
self._uncollected.discard(obj)
if hasattr(obj, 'collect_asset_docs'):
# Resource and Datum documents
for name, doc in obj.collect_asset_docs():
# Add a 'run_start' field to the resource document on its way out.
if name == 'resource':
doc['run_start'] = self._run_start_uid
await self.emit(DocumentNames(name), doc)
named_data_keys = obj.describe_collect()
# e.g., {name_for_desc1: data_keys_for_desc1,
# name for_desc2: data_keys_for_desc2, ...}
bulk_data = {}
local_descriptors = {} # hashed on obj_read, not (name, objs_read)
for stream_name, data_keys in named_data_keys.items():
desc_key = stream_name
d_objs = frozenset(data_keys)
if desc_key not in self._descriptors:
objs_read = d_objs
# We don't not have an Event Descriptor for this set.
descriptor_uid = new_uid()
object_keys = {obj.name: list(data_keys)}
hints = {}
if hasattr(obj, 'hints'):
hints.update({obj.name: obj.hints})
doc = dict(run_start=self._run_start_uid, time=ttime.time(),
data_keys=data_keys, uid=descriptor_uid,
name=stream_name, hints=hints,
object_keys=object_keys)
await self.emit(DocumentNames.descriptor, doc)
self.log.debug("Emitted Event Descriptor with name %r "
"containing data keys %r (uid=%r)", stream_name,
data_keys.keys(), descriptor_uid)
self._descriptors[desc_key] = (objs_read, doc)
self._sequence_counters[desc_key] = count(1)
else:
objs_read, doc = self._descriptors[desc_key]
if d_objs != objs_read:
raise RuntimeError("Mismatched objects read, "
"expected {!s}, "
"got {!s}".format(d_objs, objs_read))
descriptor_uid = doc['uid']
local_descriptors[objs_read] = (stream_name, descriptor_uid)
bulk_data[descriptor_uid] = []
# If stream is True, run 'event' subscription per document.
# If stream is False, run 'bulk_events' subscription once.
stream = msg.kwargs.get('stream', False)
for ev in obj.collect():
objs_read = frozenset(ev['data'])
stream_name, descriptor_uid = local_descriptors[objs_read]
seq_num = next(self._sequence_counters[stream_name])
event_uid = new_uid()
reading = ev['data']
for key in ev['data']:
reading[key] = reading[key]
ev['data'] = reading
ev['descriptor'] = descriptor_uid
ev['seq_num'] = seq_num
ev['uid'] = event_uid
if stream:
self.log.debug("Emitted Event with data keys %r (uid=%r)",
ev['data'].keys(), ev['uid'])
await self.emit(DocumentNames.event, ev)
else:
bulk_data[descriptor_uid].append(ev)
if not stream:
await self.emit(DocumentNames.bulk_events, bulk_data)
self.log.debug("Emitted bulk events for descriptors with uids "
"%r", bulk_data.keys())
async def _null(self, msg):
"""
A no-op message, mainly for debugging and testing.
"""
pass
async def _set(self, msg):
"""
Set a device and cache the returned status object.
Also, note that the device has been touched so it can be stopped upon
exit.
Expected message object is
Msg('set', obj, *args, **kwargs)
where arguments are passed through to `obj.set(*args, **kwargs)`.
"""
kwargs = dict(msg.kwargs)
group = kwargs.pop('group', None)
self._movable_objs_touched.add(msg.obj)
ret = msg.obj.set(*msg.args, **kwargs)
p_event = asyncio.Event(loop=self.loop)
pardon_failures = self._pardon_failures
def done_callback():
self.log.debug("The object %r reports set is done "
"with status %r", msg.obj, ret.success)
task = self._loop.call_soon_threadsafe(
self._status_object_completed, ret, p_event, pardon_failures)
self._status_tasks.append(task)
try:
ret.add_callback(done_callback)
except AttributeError:
# for ophyd < v0.8.0
ret.finished_cb = done_callback
self._groups[group].add(p_event.wait)
self._status_objs[group].add(ret)
return ret
async def _trigger(self, msg):
"""
Trigger a device and cache the returned status object.
Expected message object is:
Msg('trigger', obj)
"""
kwargs = dict(msg.kwargs)
group = kwargs.pop('group', None)
ret = msg.obj.trigger(*msg.args, **kwargs)
p_event = asyncio.Event(loop=self.loop)
pardon_failures = self._pardon_failures
def done_callback():
self.log.debug("The object %r reports trigger is "
"done with status %r.", msg.obj, ret.success)
task = self._loop.call_soon_threadsafe(
self._status_object_completed, ret, p_event, pardon_failures)
self._status_tasks.append(task)
try:
ret.add_callback(done_callback)
except AttributeError:
# for ophyd < v0.8.0
ret.finished_cb = done_callback
self._groups[group].add(p_event.wait)
self._status_objs[group].add(ret)
return ret
async def _wait(self, msg):
"""Block progress until every object that was triggered or set
with the keyword argument `group=<GROUP>` is done.
Expected message object is:
Msg('wait', group=<GROUP>)
where ``<GROUP>`` is any hashable key.
"""
if msg.args:
group, = msg.args
else:
group = msg.kwargs['group']
futs = list(self._groups.pop(group, []))
if futs:
status_objs = self._status_objs.pop(group)
try:
if self.waiting_hook is not None:
# Notify the waiting_hook function that the RunEngine is
# waiting for these status_objs to complete. Users can use
# the information these encapsulate to create a progress
# bar.
self.waiting_hook(status_objs)
await self._wait_for(Msg('wait_for', None, futs))
finally:
if self.waiting_hook is not None:
# Notify the waiting_hook function that we have moved on by
# sending it `None`. If all goes well, it could have
# inferred this from the status_obj, but there are edge
# cases.
self.waiting_hook(None)
def _status_object_completed(self, ret, p_event, pardon_failures):
"""
Created as a task on the loop when a status object is finished
Parameters
----------
ret : status object
p_event : asyncio.Event
held in the RunEngine's self._groups cache for waiting
pardon_failuers : asyncio.Event
tells us whether the __call__ this status object is over
"""
if not ret.success and not pardon_failures.is_set():
# TODO: need a better channel to move this information back
# to the run task.
with self._state_lock:
self._exception = FailedStatus(ret)
p_event.set()
async def _sleep(self, msg):
"""Sleep the event loop
Expected message object is:
Msg('sleep', None, sleep_time)
where `sleep_time` is in seconds
"""
await asyncio.sleep(*msg.args, loop=self.loop)
async def _pause(self, msg):
"""Request the run engine to pause
Expected message object is:
Msg('pause', defer=False, name=None, callback=None)
See RunEngine.request_pause() docstring for explanation of the three
keyword arguments in the `Msg` signature
"""
await self._request_pause_coro(*msg.args, **msg.kwargs)
async def _resume(self, msg):
"""Request the run engine to resume
Expected message object is:
Msg('resume', defer=False, name=None, callback=None)
See RunEngine.resume() docstring for explanation of the three
keyword arguments in the `Msg` signature
"""
# Re-instate monitoring callbacks.
for obj, (cb, kwargs) in self._monitor_params.items():
obj.subscribe(cb, **kwargs)
# Notify Devices of the resume in case they want to clean up.
for obj in self._objs_seen:
if hasattr(obj, 'resume'):
obj.resume()
async def _checkpoint(self, msg):
"""Instruct the RunEngine to create a checkpoint so that we can rewind
to this point if necessary
Expected message object is:
Msg('checkpoint')
"""
if self._bundling:
raise IllegalMessageSequence("Cannot 'checkpoint' after 'create' "
"and before 'save'. Aborting!")
await self._reset_checkpoint_state_coro()
if self._deferred_pause_requested:
# We are at a checkpoint; we are done deferring the pause.
# Give the _check_for_signals coroutine time to look for
# additional SIGINTs that would trigger an abort.
await asyncio.sleep(0.5, loop=self.loop)
await self._request_pause_coro(defer=False)
def _reset_checkpoint_state(self):
self._reset_checkpoint_state_meth()
def _reset_checkpoint_state_meth(self):
if self._msg_cache is None:
return
self._msg_cache = deque()
# Keep a safe separate copy of the sequence counters to use if we
# rewind and retake some data points.
for key, counter in list(self._sequence_counters.items()):
counter_copy1, counter_copy2 = tee(counter)
self._sequence_counters[key] = counter_copy1
self._teed_sequence_counters[key] = counter_copy2
_reset_checkpoint_state_coro = asyncio.coroutine(_reset_checkpoint_state)
async def _clear_checkpoint(self, msg):
"""Clear a set checkpoint
Expected message object is:
Msg('clear_checkpoint')
"""
# clear message cache
self._msg_cache = None
# clear stashed
self._teed_sequence_counters.clear()
async def _rewindable(self, msg):
'''Set rewindable state of RunEngine
Expected message object is:
Msg('rewindable', None, bool or None)
'''
rw_flag, = msg.args
if rw_flag is not None:
self.rewindable = rw_flag
return self.rewindable
async def _configure(self, msg):
"""Configure an object
Expected message object is:
Msg('configure', object, *args, **kwargs)
which results in this call:
object.configure(*args, **kwargs)
"""
if self._bundling:
raise IllegalMessageSequence(
"Cannot configure after 'create' but before 'save'"
"Aborting!")
_, obj, args, kwargs = msg
# Invalidate any event descriptors that include this object.
# New event descriptors, with this new configuration, will
# be created for any future event documents.
for name in list(self._descriptors):
obj_set, _ = self._descriptors[name]
if obj in obj_set:
del self._descriptors[name]
old, new = obj.configure(*args, **kwargs)
self._cache_config(obj)
return old, new
async def _stage(self, msg):
"""Instruct the RunEngine to stage the object
Expected message object is:
Msg('stage', object)
"""
_, obj, args, kwargs = msg
# If an object has no 'stage' method, assume there is nothing to do.
if not hasattr(obj, 'stage'):
return []
result = obj.stage()
self._staged.add(obj) # add first in case of failure below
await self._reset_checkpoint_state_coro()
return result
async def _unstage(self, msg):
"""Instruct the RunEngine to unstage the object
Expected message object is:
Msg('unstage', object)
"""
_, obj, args, kwargs = msg
# If an object has no 'unstage' method, assume there is nothing to do.
if not hasattr(obj, 'unstage'):
return []
result = obj.unstage()
# use `discard()` to ignore objects that are not in the staged set.
self._staged.discard(obj)
await self._reset_checkpoint_state_coro()
return result
async def _stop(self, msg):
"""
Stop a device.
Expected message object is:
Msg('stop', obj)
"""
return msg.obj.stop() # nominally, this returns None
async def _subscribe(self, msg):
"""
Add a subscription after the run has started.
This, like subscriptions passed to __call__, will be removed at the
end by the RunEngine.
Expected message object is:
Msg('subscribe', None, callback_function, document_name)
where `document_name` is one of:
{'start', 'descriptor', 'event', 'stop', 'all'}
and `callback_function` is expected to have a signature of:
``f(name, document)``
where name is one of the ``document_name`` options and ``document``
is one of the document dictionaries in the event model.
See the docstring of bluesky.run_engine.Dispatcher.subscribe() for more
information.
"""
self.log.debug("Adding subscription %r", msg)
_, obj, args, kwargs = msg
token = self.subscribe(*args, **kwargs)
self._temp_callback_ids.add(token)
await self._reset_checkpoint_state_coro()
return token
async def _unsubscribe(self, msg):
"""
Remove a subscription during a call -- useful for a multi-run call
where subscriptions are wanted for some runs but not others.
Expected message object is:
Msg('unsubscribe', None, TOKEN)
Msg('unsubscribe', token=TOKEN)
where ``TOKEN`` is the return value from ``RunEngine._subscribe()``
"""
self.log.debug("Removing subscription %r", msg)
_, obj, args, kwargs = msg
try:
token = kwargs['token']
except KeyError:
token, = args
self.unsubscribe(token)
self._temp_callback_ids.remove(token)
await self._reset_checkpoint_state_coro()
async def _input(self, msg):
"""
Process a 'input' Msg. Expected Msg:
Msg('input', None)
Msg('input', None, prompt='>') # customize prompt
"""
prompt = msg.kwargs.get('prompt', '')
async_input = AsyncInput(self.loop)
async_input = functools.partial(async_input, end='', flush=True)
return (await async_input(prompt))
async def emit(self, name, doc):
"Process blocking callbacks and schedule non-blocking callbacks."
schema_validators[name].validate(doc)
self.dispatcher.process(name, doc)
class Dispatcher:
"""Dispatch documents to user-defined consumers on the main thread."""
def __init__(self):
self.cb_registry = CallbackRegistry(allowed_sigs=DocumentNames)
self._counter = count()
self._token_mapping = dict()
def process(self, name, doc):
"""
Dispatch document ``doc`` of type ``name`` to the callback registry.
Parameters
----------
name : {'start', 'descriptor', 'event', 'stop'}
doc : dict
"""
exceptions = self.cb_registry.process(name, name.name, doc)
for exc, traceback in exceptions:
warn("A %r was raised during the processing of a %s "
"Document. The error will be ignored to avoid "
"interrupting data collection. To investigate, "
"set RunEngine.ignore_callback_exceptions = False "
"and run again." % (exc, name.name))
def subscribe(self, func, name='all'):
"""
Register a callback function to consume documents.
.. versionchanged :: 0.10.0
The order of the arguments was swapped and the ``name``
argument has been given a default value, ``'all'``. Because the
meaning of the arguments is unambiguous (they must be a callable
and a string, respectively) the old order will be supported
indefinitely, with a warning.
.. versionchanged :: 0.10.0
The order of the arguments was swapped and the ``name``
argument has been given a default value, ``'all'``. Because the
meaning of the arguments is unambiguous (they must be a callable
and a string, respectively) the old order will be supported
indefinitely, with a warning.
Parameters
----------
func: callable
expecting signature like ``f(name, document)``
where name is a string and document is a dict
name : {'all', 'start', 'descriptor', 'event', 'stop'}, optional
the type of document this function should receive ('all' by
default).
Returns
-------
token : int
an integer ID that can be used to unsubscribe
See Also
--------
:meth:`Dispatcher.unsubscribe`
an integer token that can be used to unsubscribe
"""
if callable(name) and isinstance(func, str):
name, func = func, name
warn("The order of the arguments has been changed. Because the "
"meaning of the arguments is unambiguous, the old usage will "
"continue to work indefinitely, but the new usage is "
"encouraged: call subscribe(func, name) instead of "
"subscribe(name, func). Additionally, the 'name' argument "
"has become optional. Its default value is 'all'.")
if name == 'all':
private_tokens = []
for key in DocumentNames:
private_tokens.append(self.cb_registry.connect(key, func))
public_token = next(self._counter)
self._token_mapping[public_token] = private_tokens
return public_token
name = DocumentNames[name]
private_token = self.cb_registry.connect(name, func)
public_token = next(self._counter)
self._token_mapping[public_token] = [private_token]
return public_token
def unsubscribe(self, token):
"""
Unregister a callback function using its integer ID.
Parameters
----------
token : int
the integer ID issued by :meth:`Dispatcher.subscribe`
See Also
--------
:meth:`Dispatcher.subscribe`
"""
for private_token in self._token_mapping[token]:
self.cb_registry.disconnect(private_token)
def unsubscribe_all(self):
"""Unregister all callbacks from the dispatcher
"""
for public_token in self._token_mapping.keys():
self.unsubscribe(public_token)
@property
def ignore_exceptions(self):
return self.cb_registry.ignore_exceptions
@ignore_exceptions.setter
def ignore_exceptions(self, val):
self.cb_registry.ignore_exceptions = val
def _rearrange_into_parallel_dicts(readings):
data = {}
timestamps = {}
for key, payload in readings.items():
data[key] = payload['value']
timestamps[key] = payload['timestamp']
return data, timestamps
PAUSE_MSG = """
Your RunEngine is entering a paused state. These are your options for changing
the state of the RunEngine:
RE.resume() Resume the plan.
RE.abort() Perform cleanup, then kill plan. Mark exit_stats='aborted'.
RE.stop() Perform cleanup, then kill plan. Mark exit_status='success'.
RE.halt() Emergency Stop: Do not perform cleanup --- just stop.
"""
MAX_DEPTH_EXCEEDED_ERR_MSG = """
RunEngine.max_depth is set to {}; depth of {} was detected.
The RunEngine should not be called from inside another function. Doing so
breaks introspection tools and can result in unexpected behavior in the event
of an interruption. See documentation for more information and what to do
instead:
http://nsls-ii.github.io/bluesky/plans_intro.html#combining-plans
"""
def _default_md_validator(md):
if 'sample' in md and not (hasattr(md['sample'], 'keys') or
isinstance(md['sample'], str)):
raise ValueError(
"You specified 'sample' metadata. We give this field special "
"significance in order to make your data easily searchable. "
"Therefore, you must make 'sample' a string or a "
"dictionary, like so: "
"GOOD: sample='dirt' "
"GOOD: sample={'color': 'red', 'number': 5} "
"BAD: sample=[1, 2] ")
def _ensure_event_loop_running(loop):
"""
Run an asyncio event loop forever on a background thread.
This is idempotent: if the loop is already running nothing will be done.
"""
if not loop.is_running():
th = threading.Thread(target=loop.run_forever, daemon=True)
th.start()
_ensure_event_loop_running.loop_to_thread[loop] = th
else:
th = _ensure_event_loop_running.loop_to_thread[loop]
return th
_ensure_event_loop_running.loop_to_thread = weakref.WeakKeyDictionary()
_bluesky_event_loop = None
def get_bluesky_event_loop():
global _bluesky_event_loop
if _bluesky_event_loop is None:
_bluesky_event_loop = asyncio.new_event_loop()
return _bluesky_event_loop
def set_bluesky_event_loop(loop):
global _bluesky_event_loop
_bluesky_event_loop = loop
|
dataset.py
|
# ***************************************************************
# Copyright (c) 2020 Jittor. Authors:
# Meng-Hao Guo <guomenghao1997@gmail.com>
# Dun Liang <randonlang@gmail.com>.
# All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import numpy as np
from urllib import request
import gzip
import pickle
import os
from jittor.dataset.utils import get_random_list, get_order_list, collate_batch
from collections.abc import Sequence, Mapping
import pathlib
from PIL import Image
from jittor_utils.ring_buffer import RingBuffer
import multiprocessing as mp
import signal
from jittor_utils import LOG
import jittor as jt
dataset_root = os.path.join(pathlib.Path.home(), ".cache", "jittor", "dataset")
mp_log_v = os.environ.get("mp_log_v", 0)
class Worker:
def __init__(self, target, args, buffer_size):
buffer = mp.Array('c', buffer_size, lock=False)
self.buffer = RingBuffer(buffer)
self.p = mp.Process(target=target, args=args+(self.buffer,))
self.p.daemon = True
self.p.start()
class Dataset(object):
'''
base class for reading data
Example:
class YourDataset(Dataset):
def __init__(self):
super().__init__()
self.set_attrs(total_len=1024)
def __getitem__(self, k):
return k, k*k
dataset = YourDataset().set_attrs(batch_size=256, shuffle=True)
for x, y in dataset:
......
'''
def __init__(self,
batch_size = 16,
shuffle = False,
drop_last = False,
num_workers = 0,
buffer_size = 512*1024*1024):
super().__init__()
self.total_len = None
self.batch_size = batch_size
self.shuffle = shuffle
self.drop_last = drop_last
self.num_workers = num_workers
self.buffer_size = buffer_size
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
assert self.total_len >= 0
assert self.batch_size > 0
if self.drop_last:
return self.total_len // self.batch_size
return (self.total_len-1) // self.batch_size + 1
def set_attrs(self, **kw):
'''set attributes of dataset, equivalent to setattr
Attrs:
batch_size(int): batch size, default 16.
totol_len(int): totol lenght.
shuffle(bool): shuffle at each epoch, default False.
drop_last(bool): if true, the last batch of dataset
might smaller than batch_size, default True.
num_workers: number of workers for loading data
buffer_size: buffer size for each worker in bytes,
default(512MB).
'''
for k,v in kw.items():
assert hasattr(self, k), k
setattr(self, k, v)
return self
def to_jittor(self, batch):
if isinstance(batch, np.ndarray):
return jt.array(batch)
assert isinstance(batch, Sequence)
new_batch = []
for a in batch:
if isinstance(a, np.ndarray) or \
isinstance(a, int) or \
isinstance(a, float):
new_batch.append(jt.array(a))
else:
new_batch.append(a)
return new_batch
def collate_batch(self, batch):
return collate_batch(batch)
def terminate(self):
if hasattr(self, "workers"):
for w in self.workers:
w.p.terminate()
def _worker_main(self, worker_id, buffer):
try:
gid_obj = self.gid.get_obj()
gid_lock = self.gid.get_lock()
while True:
with gid_lock:
while gid_obj.value >= self.batch_len:
self.num_idle.value += 1
self.num_idle_c.notify()
self.gidc.wait()
self.num_idle.value -= 1
cid = gid_obj.value
self.idmap[cid] = worker_id
gid_obj.value += 1
self.gidc.notify()
batch = []
if mp_log_v:
print(f"#{worker_id} {os.getpid()} load batch", cid*self.batch_size, min(self.total_len, (cid+1)*self.batch_size))
for i in range(cid*self.batch_size, min(self.total_len, (cid+1)*self.batch_size)):
batch.append(self[self.index_list[i]])
batch = self.collate_batch(batch)
if mp_log_v:
print(f"#{worker_id} {os.getpid()} send", type(batch).__name__, [ type(b).__name__ for b in batch ], buffer)
buffer.send(batch)
except:
os.kill(os.getppid(), signal.SIGINT)
raise
def _stop_all_workers(self):
# wait until all workers idle
if self.num_idle.value < self.num_workers:
with self.gid.get_lock():
self.gid.get_obj().value = self.batch_len
if mp_log_v:
print("idle num", self.num_idle.value)
while self.num_idle.value < self.num_workers:
self.num_idle_c.wait()
if mp_log_v:
print("idle num", self.num_idle.value)
# clean workers' buffer
for w in self.workers:
w.buffer.clear()
def _init_workers(self):
self.index_list = mp.Array('i', self.total_len, lock=False)
workers = []
# batch id to worker id
self.idmap = mp.Array('i', self.batch_len, lock=False)
# global token index
self.gid = mp.Value('i', self.batch_len)
# global token index condition
self.gidc = mp.Condition(self.gid.get_lock())
# number of idle workers
self.num_idle = mp.Value('i', 0, lock=False)
# number of idle workers condition
self.num_idle_c = mp.Condition(self.gid.get_lock())
for i in range(self.num_workers):
w = Worker(target=self._worker_main, args=(i,),
buffer_size=self.buffer_size)
workers.append(w)
self.workers = workers
self.index_list_numpy = np.ndarray(dtype='int32', shape=self.total_len, buffer=self.index_list)
def __del__(self):
if mp_log_v:
print("dataset deleted")
self.terminate()
def __iter__(self):
if self.shuffle == False:
index_list = get_order_list(self.total_len)
else:
index_list = get_random_list(self.total_len)
self.batch_len = len(self)
if "batch_len" in os.environ:
self.batch_len = int(os.environ["batch_len"])
if not hasattr(self, "workers") and self.num_workers:
self._init_workers()
if self.num_workers:
self._stop_all_workers()
self.index_list_numpy[:] = index_list
gid_obj = self.gid.get_obj()
gid_lock = self.gid.get_lock()
with gid_lock:
gid_obj.value = 0
self.gidc.notify_all()
for i in range(self.batch_len):
# try not get lock first
if gid_obj.value <= i:
with gid_lock:
if gid_obj.value <= i:
if mp_log_v:
print("wait")
self.gidc.wait()
worker_id = self.idmap[i]
w = self.workers[worker_id]
if mp_log_v:
print(f"#{worker_id} {os.getpid()} recv buffer", w.buffer)
batch = w.buffer.recv()
if mp_log_v:
print(f"#{worker_id} {os.getpid()} recv", type(batch).__name__, [ type(b).__name__ for b in batch ])
batch = self.to_jittor(batch)
yield batch
else:
batch_data = []
for idx in index_list:
batch_data.append(self[int(idx)])
if len(batch_data) == self.batch_size:
batch_data = self.collate_batch(batch_data)
batch_data = self.to_jittor(batch_data)
yield batch_data
batch_data = []
# depend on drop_last
if not self.drop_last and len(batch_data) > 0:
batch_data = self.collate_batch(batch_data)
batch_data = self.to_jittor(batch_data)
yield batch_data
class ImageFolder(Dataset):
"""A image classify dataset, load image and label from directory:
root/label1/img1.png
root/label1/img2.png
...
root/label2/img1.png
root/label2/img2.png
...
Args:
root(string): Root directory path.
Attributes:
classes(list): List of the class names.
class_to_idx(dict): map from class_name to class_index.
imgs(list): List of (image_path, class_index) tuples
"""
def __init__(self, root, transform=None):
# import ipdb; ipdb.set_trace()
super().__init__()
self.root = root
self.transform = transform
self.classes = sorted([d.name for d in os.scandir(root) if d.is_dir()])
self.class_to_idx = {v:k for k,v in enumerate(self.classes)}
self.imgs = []
image_exts = set(('.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'))
for i, class_name in enumerate(self.classes):
class_dir = os.path.join(root, class_name)
for dname, _, fnames in sorted(os.walk(class_dir, followlinks=True)):
for fname in sorted(fnames):
if os.path.splitext(fname)[-1].lower() in image_exts:
path = os.path.join(class_dir, fname)
self.imgs.append((path, i))
LOG.i(f"Found {len(self.classes)} classes and {len(self.imgs)} images.")
self.set_attrs(total_len=len(self.imgs))
def __getitem__(self, k):
with open(self.imgs[k][0], 'rb') as f:
img = Image.open(f).convert('RGB')
if self.transform:
img = self.transform(img)
return img, self.imgs[k][1]
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= (pandaState.pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected)
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
|
WebcamVideoStream.py
|
# import the necessary packages
from threading import Thread
import cv2
class WebcamVideoStream:
def __init__(self, src=0, resolution=(1280, 720), framerate=32):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0])
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1])
self.stream.set(cv2.CAP_PROP_FPS, framerate)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
cluster.py
|
# Copyright (c) 2015-2020 Avere Systems, Inc. All Rights Reserved.
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root for license information.
'''vFXT Cluster management
Cookbook/examples:
# A cluster is built with a service object (aws or gce)
service = vFXT.aws.Service() | vFXT.gce.Service()
# create a cluster
cluster = Cluster.create(service, ...)
# load from an existing, online cluster (queries xmlrpc)
cluster = Cluster.load(service, mgmt_ip='xxx', admin_password='xxx')
# offline with node instance ids provided
cluster = Cluster(service=service,
nodes=['node-1', 'node-2', 'node-1'],
admin_password='password',
mgmt_ip='10.10.10.10')
serializeme = cluster.export()
cluster = Cluster(service, **serializeme)
cluster.start()
cluster.stop()
cluster.restart()
cluster.destroy()
cluster.shelve()
cluster.unshelve()
cluster.is_on()
cluster.is_off()
cluster.is_shelved()
cluster.status()
cluster.wait_for_healthcheck()
cluster.wait_for_service_checks()
cluster.wait_for_cluster_activity()
cluster.wait_for_nodes_to_join()
cluster_cfg = cluster.cluster_config()
joincfg = cluster.cluster_config(joining=True)
cluster.in_use_addresses()
rpc = cluster.xmlrpc()
cluster.verify_license()
cluster.upgrade('http://path/to/armada.pkg')
# buckets
cluster.make_test_bucket(bucketname='unique_bucket', corefiler='cloudfiler')
# or
service.create_bucket('unique_bucket')
cluster.attach_bucket('cloudfiler', 'mypassword', 'unique_bucket')
cluster.add_vserver('vserver')
cluster.add_vserver_junction('vserver','cloudfiler')
# NFS filer
cluster.attach_corefiler('grapnel', 'grapnel.lab.avere.net')
cluster.add_vserver_junction('vserver', 'grapnel', path='/nfs', export='/vol/woodwardj')
# maint
cluster.enable_ha()
cluster.rebalance_directory_managers()
cluster.refresh()
cluster.reload()
# Full AWS example
cluster = Cluster.create(aws, 'r3.2xlarge', 'mycluster', 'PLACEHOLDER',
subnet='subnet-f99a618e',
placement_group='perf1',
wait_for_state='yellow')
try:
cluster.make_test_bucket(bucketname='mycluster-bucket', corefiler='aws')
cluster.add_vserver('vserver')
cluster.add_vserver_junction('vserver', 'aws')
except Exception as e:
cluster.destroy()
raise
'''
from builtins import range #pylint: disable=redefined-builtin
from future.utils import raise_from
import base64
import threading
import queue as Queue
import time
import logging
import uuid
import re
import socket
from xmlrpc.client import Fault as xmlrpclib_Fault
import math
import itertools
import vFXT.xmlrpcClt
from vFXT.serviceInstance import ServiceInstance
from vFXT.service import vFXTServiceFailure, vFXTConfigurationException, vFXTCreateFailure, vFXTStatusFailure, vFXTConnectionFailure, ServiceBase, validate_proxy
from vFXT.cidr import Cidr
log = logging.getLogger(__name__)
class Cluster(object): #pylint: disable=useless-object-inheritance
'''Cluster representation
Cluster composes the backend service object and performs all
operations through it or the XMLRPC client.
'''
CONFIGURATION_EXPIRATION = 1800
JOIN_CONFIGURATION_EXPIRATION = 7200
LICENSE_TIMEOUT = 120
def __init__(self, service, **options):
'''Constructor
The only required argument is the service backend.
To create a cluster, use Cluster.create()
To load a cluster, use Cluster.load()
Arguments:
service: the backend service
nodes ([], optional): optional list of node IDs
mgmt_ip (str, optional): management address
admin_password (str, optional): administration password
name (str, optional): cluster name
machine_type (str, optional): machine type of nodes in the cluster
mgmt_netmask (str, optional): netmask of management network
proxy_uri (str, optional): URI of proxy resource (e.g. http://user:pass@172.16.16.20:8080)
If called with mgmt_ip and admin_password, the cluster object will
query the management address and fill in all of the details required.
If called with just a list of node IDs, the cluster will lookup the
service instance backing objects associated with the node IDs.
This is handy for offline clusters.
'''
self.service = service
self.nodes = options.get('nodes', [])
self.mgmt_ip = options.get('mgmt_ip', None)
self.admin_password = options.get('admin_password', None)
self.name = options.get('name', None)
self.machine_type = options.get('machine_type', None)
self.mgmt_netmask = options.get('mgmt_netmask', None)
self.cluster_ip_start = options.get('cluster_ip_start', None)
self.cluster_ip_end = options.get('cluster_ip_end', None)
self.proxy = options.get('proxy_uri', None)
self.join_mgmt = True
self.trace_level = None
self.node_rename = True
self.first_node_error = None
self.timezone = None
self.instance_addresses = []
if self.proxy:
self.proxy = validate_proxy(self.proxy) # imported from vFXT.service
# we may be passed a list of instance IDs for offline clusters that we
# can't query
if self.service and self.nodes and all([not isinstance(i, ServiceInstance) for i in self.nodes]):
instances = []
for node_id in self.nodes:
log.debug("Loading node {}".format(node_id))
instance = service.get_instance(node_id)
if not instance:
raise vFXTConfigurationException("Unable to find instance {}".format(node_id))
instances.append(ServiceInstance(service=self.service, instance=instance))
self.nodes = instances
if self.mgmt_ip and self.admin_password and self.nodes and self.is_on():
# might as well if we can, otherwise use the load() constructor
self.load_cluster_information()
@classmethod
def create(cls, service, machine_type, name, admin_password, **options):
'''Create a cluster
Arguments:
service: the backend service
machine_type (str): service specific machine type
name (str): cluster name (used or all subsequent resource naming)
admin_password (str): administration password to assign to the cluster
wait_for_state (str, optional): red, yellow, green cluster state (defaults to yellow)
wait_for_state_duration (int, optional): number of seconds state must be maintained, defaults to 30
proxy_uri (str, optional): URI of proxy resource (e.g. http://user:pass@172.16.16.20:8080)
skip_cleanup (bool, optional): do not clean up on failure
management_address (str, optional): management address for the cluster
trace_level (str, optional): trace configuration
timezone (str, optional): Set cluster timezone
join_instance_address (bool, optional): Join cluster using instance rather than management address (defaults to True)
skip_node_renaming (bool optional): Do not automatically configure and enforce node naming convention (defaults to False)
size (int, optional): size of cluster (node count), defaults to 3
root_image (str, optional): root disk image name
address_range_start (str, optional): The first of a custom range of addresses to use for the cluster
address_range_end (str, optional): The last of a custom range of addresses to use for the cluster
address_range_netmask (str, optional): cluster address range netmask
instance_addresses ([str], optional): list of instance IP addresses to assign to the cluster nodes
**options: passed to Service.create_cluster()
'''
c = cls(service)
c.admin_password = admin_password or '' # could be empty
c.machine_type = machine_type
c.name = name
c.proxy = options.get('proxy_uri', None)
c.trace_level = options.get('trace_level', None)
c.timezone = options.get('timezone', None)
c.join_mgmt = not options.get('join_instance_address', True)
if c.proxy:
c.proxy = validate_proxy(c.proxy) # imported from vFXT.service
if options.get('skip_node_renaming'):
c.node_rename = False
if not options.get('size'):
options['size'] = 3
cluster_size = int(options['size'])
if not name:
raise vFXTConfigurationException("A cluster name is required")
if not cls.valid_cluster_name(name):
raise vFXTConfigurationException("{} is not a valid cluster name".format(name))
if options.get('management_address'):
c.mgmt_ip = options.get('management_address')
if service.in_use_addresses('{}/32'.format(c.mgmt_ip)):
raise vFXTConfigurationException("The requested management address {} is already in use".format(c.mgmt_ip))
# Need to validate if instance_addresses passed in are already in use before creating the cluster
if options.get('instance_addresses'):
try:
already_in_use = []
for address in options['instance_addresses']:
if service.in_use_addresses('{}/32'.format(address)):
already_in_use.append(address)
if already_in_use:
raise vFXTConfigurationException("The requested instance addresses are already in use: {}".format(', '.join(already_in_use)))
if len(options['instance_addresses']) != cluster_size:
raise vFXTConfigurationException("Not enough instance addresses provided, require {}".format(cluster_size))
except vFXTConfigurationException:
raise
except Exception as e:
log.debug(e)
raise_from(vFXTConfigurationException("Invalid instance addresses: {}".format(options['instance_addresses'])), e)
c.instance_addresses = options['instance_addresses']
# determine how many addresses we need
instance_count = cluster_size if (service.ALLOCATE_INSTANCE_ADDRESSES and not c.instance_addresses) else 0
management_count = 0 if options.get('management_address') else 1
ip_count = cluster_size + instance_count + management_count
if all([options.get(_) for _ in ['address_range_start', 'address_range_end', 'address_range_netmask']]):
try:
already_in_use = []
cluster_range = Cidr.expand_address_range(options.get('address_range_start'), options.get('address_range_end'))
for address in cluster_range:
if c.service.in_use_addresses('{}/32'.format(address)):
already_in_use.append(address)
if already_in_use:
raise vFXTConfigurationException("The requested instance addresses are already in use: {}".format(', '.join(already_in_use)))
if len(cluster_range) < ip_count:
raise vFXTConfigurationException("Not enough addresses provided, require {}".format(ip_count))
log.debug("Using overrides for cluster management and address range")
if management_count:
c.mgmt_ip = cluster_range[0]
if instance_count:
c.instance_addresses = cluster_range[management_count:instance_count + management_count]
c.cluster_ip_start = cluster_range[management_count + instance_count]
c.cluster_ip_end = cluster_range[-1]
c.mgmt_netmask = options['address_range_netmask']
except vFXTConfigurationException:
raise
except Exception as e:
log.debug(e)
raise_from(vFXTConfigurationException("Invalid instance addresses: {}".format(options['instance_addresses'])), e)
else:
in_use_addresses = []
if c.mgmt_ip:
in_use_addresses.append(c.mgmt_ip)
if c.instance_addresses:
in_use_addresses.extend(c.instance_addresses)
avail, mask = service.get_available_addresses(count=ip_count, contiguous=True, in_use=in_use_addresses)
if management_count:
c.mgmt_ip = avail[0]
if instance_count:
c.instance_addresses = avail[management_count:instance_count + management_count]
c.cluster_ip_start = avail[management_count + instance_count]
c.cluster_ip_end = avail[-1]
c.mgmt_netmask = mask
# machine type is validated by service create_cluster
try:
service.create_cluster(c, **options)
if options.get('skip_configuration'):
return c
except KeyboardInterrupt:
if not options.get('skip_cleanup', False):
c.destroy(quick_destroy=True)
raise
try:
# any service specific instance checks should happen here... the checks
# might have to restart the nodes
c.wait_for_service_checks()
xmlrpc = c.xmlrpc()
retries = int(options.get('join_wait', 500 + (500 * math.log(len(c.nodes)))))
# should get all the nodes joined by now
c.allow_node_join(retries=retries, xmlrpc=xmlrpc)
c.wait_for_nodes_to_join(retries=retries, xmlrpc=xmlrpc)
c.allow_node_join(enable=False, retries=retries, xmlrpc=xmlrpc)
c.set_node_naming_policy(xmlrpc=xmlrpc)
if len(c.nodes) > 1:
c.enable_ha(xmlrpc=xmlrpc)
c.verify_license(xmlrpc=xmlrpc)
log.info("Waiting for cluster healthcheck")
c.wait_for_healthcheck(state=options.get('wait_for_state', 'yellow'),
duration=int(options.get('wait_for_state_duration', 30)), xmlrpc=xmlrpc)
except (KeyboardInterrupt, Exception) as e:
log.error("Cluster configuration failed: {}".format(e))
if not options.get('skip_cleanup', False):
c.destroy(quick_destroy=True)
else:
try:
c.telemetry()
except Exception as te:
log.debug(te)
raise_from(vFXTCreateFailure(e), e)
return c
def wait_for_healthcheck(self, state='green', retries=ServiceBase.WAIT_FOR_HEALTH_CHECKS, duration=1, conn_retries=1, xmlrpc=None):
'''Poll for cluster maxConditions
This requires the cluster to be on and be accessible via RPC
Arguments:
state (str='green'): red, yellow, green
retries (int, optional): number of retries
duration (int, optional): number of consecutive seconds condition was observed
conn_retries (int, optional): number of connection retries
xmlrpc (xmlrpcClt, optional): xmlrpc client
Sleeps Service.POLLTIME between each retry.
'''
retries = int(retries)
conn_retries = int(conn_retries)
duration = int(duration)
log.info("Waiting for healthcheck")
xmlrpc = self.xmlrpc(conn_retries) if xmlrpc is None else xmlrpc
start_time = int(time.time())
observed = 0 # observed time in the requested state
# cluster health check
acceptable_states = [state, 'green']
if state == 'red':
acceptable_states.append('yellow')
while True:
alertstats = {}
try:
alertstats = xmlrpc.cluster.maxActiveAlertSeverity()
except Exception as e:
log.debug("Ignoring cluster.maxActiveAlertSeverity() failure: {}".format(e))
xmlrpc = self.xmlrpc(conn_retries)
if 'maxCondition' in alertstats and alertstats['maxCondition'] in acceptable_states:
observed = int(time.time()) - start_time
if observed >= duration:
log.debug("{} for {}s({})... alertStats: {}".format(state, duration, observed, alertstats))
break
else:
observed = 0
start_time = int(time.time())
if retries % 10 == 0:
self._log_conditions(xmlrpc)
log.debug("Not {} for {}s({})... alertStats: {}".format(state, duration, observed, alertstats))
retries -= 1
if retries == 0:
alert_codes = []
try:
conditions = xmlrpc.alert.conditions()
alert_codes = [c['name'] for c in conditions if c['severity'] != state]
except Exception as e:
log.debug("Failed to get alert conditions: {}".format(e))
xmlrpc = self.xmlrpc(conn_retries)
if alert_codes:
raise vFXTStatusFailure("Healthcheck for state {} failed: {}".format(state, alert_codes))
raise vFXTStatusFailure("Healthcheck for state {} failed".format(state))
self._sleep()
@classmethod
def load(cls, service, mgmt_ip, admin_password):
'''Load an existing cluster over RPC
Arguments:
mgmt_ip (str): management address
admin_password (str): administration password
'''
cluster = cls(service)
cluster.mgmt_ip = mgmt_ip
cluster.admin_password = admin_password
cluster.load_cluster_information()
return cluster
def load_cluster_information(self):
'''Load cluster information through XMLRPC and the service backend
Raises: vFXTConfigurationException
'''
log.debug("Connecting to {} to load cluster data".format(self.mgmt_ip))
xmlrpc = self.xmlrpc()
cluster_data = self._xmlrpc_do(xmlrpc.cluster.get)
self.name = cluster_data['name']
self.mgmt_netmask = cluster_data['mgmtIP']['netmask']
expected_count = len(self._xmlrpc_do(xmlrpc.node.list))
log.debug("Loading {} nodes".format(self.name))
self.service.load_cluster_information(self)
if not self.nodes:
raise vFXTConfigurationException("No nodes found for cluster")
found_count = len(self.nodes)
if expected_count != found_count:
raise vFXTStatusFailure("Failed to load all {} nodes (found {})".format(expected_count, found_count))
def cluster_config(self, joining=False, expiration=CONFIGURATION_EXPIRATION, joining_expiration=JOIN_CONFIGURATION_EXPIRATION):
'''Return cluster configuration for master and slave nodes
Arguments:
joining (bool, optional): configuration for a joining node
expiration (int, optional): configuration expiration for a joining node
Raises: vFXTConfigurationException
'''
if joining:
expiry = str(int(time.time()) + (joining_expiration or self.JOIN_CONFIGURATION_EXPIRATION))
mgmt_ip = (self.nodes[0].ip() if self.nodes and not self.join_mgmt else self.mgmt_ip)
return '# cluster.cfg\n[basic]\njoin cluster={}\nexpiration={}\n'.format(mgmt_ip, expiry)
expiry = str(int(time.time()) + (expiration or self.CONFIGURATION_EXPIRATION))
dns_servs = self.service.get_dns_servers()
ntp_servs = self.service.get_ntp_servers()
router = self.service.get_default_router()
if not all([self.mgmt_ip, self.mgmt_netmask, self.cluster_ip_start, self.cluster_ip_end]):
raise vFXTConfigurationException("Management IP/Mask and the cluster IP range is required")
# generate config
config = '''# cluster.cfg''' \
'''\n[basic]''' \
'''\ncluster name={}''' \
'''\npassword={}''' \
'''\nexpiration={}''' \
'''\n[management network]''' \
'''\naddress={}''' \
'''\nnetmask={}''' \
'''\ndefault router={}''' \
'''\n[cluster network]''' \
'''\nfirst address={}''' \
'''\nlast address={}''' \
.format(self.name,
self.admin_password,
expiry,
self.mgmt_ip,
self.mgmt_netmask,
router,
self.cluster_ip_start,
self.cluster_ip_end)
config += '\n[dns]\n'
dns_count = len(dns_servs)
for idx in range(3):
v = dns_servs[idx] if idx < dns_count else ''
config += 'server{}={}\n'.format(idx + 1, v)
config += 'domain=\n'
config += '\n[ntp]\n'
ntp_count = len(ntp_servs)
for idx in range(3):
v = ntp_servs[idx] if idx < ntp_count else ''
config += 'server{}={}\n'.format(idx + 1, v)
return config
def verify_license(self, wait=LICENSE_TIMEOUT, xmlrpc=None):
'''Verify a license has been provisioned for the cluster
Arguments:
wait (int): time to wait in seconds for the license provisioning (default 60)
xmlrpc (xmlrpcClt, optional): xmlrpc client
Raises: vFXTConfigurationException
'''
if self.service.AUTO_LICENSE:
return
log.info('Waiting for FlashCloud licensing feature')
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
while wait > 0:
try:
licenses = xmlrpc.cluster.listLicenses()
if 'FlashCloud' in licenses['features']:
log.info('Feature FlashCloud enabled.')
return
except Exception as e:
log.debug(e)
if wait % 10 == 0:
log.debug('Waiting for the FlashCloud license feature to become enabled')
wait -= 1
self._sleep()
raise vFXTConfigurationException("Unable to verify cluster licensing")
def xmlrpc(self, retries=1, password=None):
'''Connect and return a new RPC connection object
Arguments:
retries (int, optional): number of retries
password (str, optional): defaults to the cluster admin_password
Raises: vFXTConnectionFailure
'''
addrs = []
if self.join_mgmt:
addrs.append(self.mgmt_ip)
if self.nodes:
addrs.append(self.nodes[0].ip())
if not addrs:
raise vFXTConfigurationException("No usable connection address for xmlrpc calls")
password = password or self.admin_password
if not password:
raise vFXTConnectionFailure("Unable to make remote API connection without a password")
while True:
# try our mgmt address or the first nodes instance address
for addr in addrs:
try:
xmlrpc = vFXT.xmlrpcClt.getXmlrpcClient("https://{}/cgi-bin/rpc2.py".format(addr), do_cert_checks=False)
xmlrpc('transport').user_agent = 'vFXT/{}'.format(vFXT.__version__)
xmlrpc.system.login(base64.b64encode('admin'.encode('utf-8')).decode(), base64.b64encode(password.encode('utf-8')).decode())
if addr != self.mgmt_ip and self.join_mgmt:
log.warning("Connected via instance address {} instead of management address {}".format(addr, self.mgmt_ip))
self._log_conditions(xmlrpc)
return xmlrpc
except Exception as e:
log.debug("Retrying failed XMLRPC connection to {}: {}".format(addr, e))
if retries == 0:
raise_from(vFXTConnectionFailure("Failed to make remote API connection: {}".format(e)), e)
retries -= 1
self._sleep()
def _xmlrpc_do(self, f, *args, **kwargs):
'''Run an xmlrpc function, retrying depending on the xmlrpc Fault
Arguments:
f (callable): rpc proxy function to call
*args: rpc arg list
**kwargs: rpc arg keywords
_xmlrpc_do_retries kwarg is special, defaults to XMLRPC_RETRIES
Retry errors include
100 AVERE_ERROR
102 AVERE_ENOENT
109 AVERE_EBUSY
'''
retry_errors = [100, 102, 109]
retries = kwargs.pop('_xmlrpc_do_retries', self.service.XMLRPC_RETRIES)
while True:
try:
return f(*args, **kwargs)
except xmlrpclib_Fault as e:
log.debug("avere xmlrpc failure: {}".format(e))
if retries == 0 or int(e.faultCode) not in retry_errors:
raise
except Exception as e:
log.debug("avere xmlrpc failure: {}".format(e))
if retries == 0:
raise
retries -= 1
self._sleep()
def _xmlrpc_wait_for_activity(self, activity, error_msg, retries=None):
'''Wait for a xmlrpc activity to complete
Arguments:
activity (str): cluster activity UUID
error_msg (str): Exception text on error
retries (int, optional): max retries, otherwise loops indefinitely
'''
if activity == 'success':
return
xmlrpc = self.xmlrpc()
tries = 0
while True:
response = {}
try:
if xmlrpc is None:
xmlrpc = self.xmlrpc()
response = xmlrpc.cluster.getActivity(activity)
log.debug(response)
except Exception as e:
log.exception("Failed to get activity {}: {}".format(activity, e))
xmlrpc = None
if 'state' in response:
if response['state'] == 'success':
break
if response['state'] == 'failure':
err = '{}: {}'.format(error_msg, response.get('status', 'Unknown'))
raise vFXTConfigurationException(err)
if retries is not None:
if retries == 0:
err = '{}: Timed out while {}'.format(error_msg, response['status'])
raise vFXTConfigurationException(err)
retries -= 1
if tries % 10 == 0 and 'status' in response:
log.info(response['status'])
self._log_conditions(xmlrpc)
self._sleep()
tries += 1
def _enable_maintenance_api(self, xmlrpc):
response = self._xmlrpc_do(xmlrpc.system.enableAPI, 'maintenance')
if response != 'success':
raise vFXTConfigurationException('Failed to enable maintenance API')
@classmethod
def _log_conditions(cls, xmlrpc):
'''Debug log the conditions
This is useful when we are polling and want to show what is going
on with the cluster while we wait.
Arguments:
xmlrpc (xmlrpcClt): xmlrpc client
'''
if not log.isEnabledFor(logging.DEBUG):
return
try:
conditions = xmlrpc.alert.conditions()
log.debug("Current conditions: {}".format(conditions))
except Exception as e:
log.debug("Failed to get condition list: {}".format(e))
def telemetry(self, wait=True, retries=ServiceBase.WAIT_FOR_TELEMETRY, mode='gsimin'):
'''Kick off a minimal telemetry reporting
Arguments:
wait (bool, optional): wait until complete
retries (int, optional): number of retries to wait (if wait is disabled)
mode (str, optional): telemetry mode (valid from support.listNormalModes)
Raises vFXTStatusFailure on failure while waiting.
'''
if mode not in self.xmlrpc().support.listNormalModes()[0]:
raise vFXTConfigurationException("Invalid support mode {}".format(mode))
try:
log.info("Kicking off {} telemetry reporting.".format(mode))
response = self.xmlrpc().support.executeNormalMode('cluster', mode)
log.debug('{} response {}'.format(mode, response))
if not wait:
return
if response != 'success':
while True:
try:
is_done = self.xmlrpc().support.taskIsDone(response) # returns bool
if is_done:
break
except Exception as e:
log.debug("Error while checking for telemetry status: {}".format(e))
if retries % 10 == 0:
log.debug('Waiting for {} to complete'.format(response))
retries -= 1
if retries == 0:
raise vFXTConfigurationException("Time out waiting for telemetry upload to finish")
self._sleep()
except Exception as e:
log.debug("Telemetry failed: {}".format(e))
raise_from(vFXTStatusFailure('Telemetry failed: {}'.format(e)), e)
def upgrade_alternate_image(self, upgrade_url, retries=None):
'''Upgrade the cluster alternate image
Arguments:
upgrade_url (str): URL for armada package
retries (int, optional): retry count for switching active images
'''
retries = retries or int(500 + (500 * math.log(len(self.nodes))))
xmlrpc = self.xmlrpc()
cluster = self._xmlrpc_do(xmlrpc.cluster.get)
alt_image = cluster['alternateImage']
upgrade_status = self._xmlrpc_do(xmlrpc.cluster.upgradeStatus)
if not upgrade_status.get('allowDownload', False):
raise vFXTConfigurationException("Upgrade downloads are not allowed at this time")
# note any existing activities to skip
existing_activities = [a['id'] for a in self._xmlrpc_do(xmlrpc.cluster.listActivities)]
log.info("Fetching alternate image from {}".format(upgrade_url))
response = self._xmlrpc_do(xmlrpc.cluster.upgrade, upgrade_url)
if response != 'success':
raise vFXTConfigurationException("Failed to start upgrade download: {}".format(response))
op_retries = retries
while cluster['alternateImage'] == alt_image:
self._sleep()
try:
cluster = self._xmlrpc_do(xmlrpc.cluster.get)
activities = [act for act in self._xmlrpc_do(xmlrpc.cluster.listActivities)
if act['id'] not in existing_activities # skip existing
if act['process'] == 'Cluster upgrade' # look for cluster upgrade or download
or 'software download' in act['process']]
failures = [_ for _ in activities if 'failure' in _['state']]
if failures:
errmsg = ', '.join([': '.join([_['process'], _['status']]) for _ in failures])
raise vFXTConfigurationException("Failed to download upgrade image: {}".format(errmsg))
if op_retries % 10 == 0:
log.debug('Current activities: {}'.format(', '.join([act['status'] for act in activities])))
# check for double+ upgrade to same version
existing_ver_msg = 'Download {} complete'.format(alt_image)
if existing_ver_msg in [act['status'] for act in activities]:
log.debug("Redownloaded existing version")
break
except vFXTConfigurationException as e:
log.debug(e)
raise
except Exception as e:
if op_retries % 10 == 0:
log.debug("Retrying install check: {}".format(e))
op_retries -= 1
if op_retries == 0:
raise vFXTConnectionFailure("Timeout waiting for alternate image")
log.info("Updated alternate image to {}".format(cluster['alternateImage']))
def activate_alternate_image(self, retries=None, ha=True):
'''Activate the alternate image
Arguments:
retries (int, optional): retry count for switching active images, default is no retries
ha (bool, optional): do an HA upgrade, True
'''
cluster = self._xmlrpc_do(self.xmlrpc().cluster.get)
if cluster['alternateImage'] == cluster['activeImage']:
log.info("Skipping upgrade since this version is active")
return
alt_image = cluster['alternateImage']
if not ha: # if not HA, at least suspend the vservers
vservers = self._xmlrpc_do(self.xmlrpc().vserver.list)
for vserver in vservers:
log.info("Suspending vserver {} on cluster {}".format(vserver, cluster['name']))
activity = self._xmlrpc_do(self.xmlrpc().vserver.suspend, vserver)
self._xmlrpc_wait_for_activity(activity, "Failed to suspend vserver {}".format(vserver))
log.debug("Waiting for alternateImage to settle (FIXME)...")
self._sleep(15) # time to settle?
upgrade_status = self._xmlrpc_do(self.xmlrpc().cluster.upgradeStatus)
if not upgrade_status.get('allowActivate', False):
raise vFXTConfigurationException("Alternate image activation is not allowed at this time")
log.info("Activating alternate image")
response = self._xmlrpc_do(self.xmlrpc().cluster.activateAltImage, ha)
log.debug("activateAltImage response: {}".format(response))
existing_activities = [a['id'] for a in self._xmlrpc_do(self.xmlrpc().cluster.listActivities)]
log.debug("existing activities prior to upgrade: {}".format(existing_activities))
tries = 0
while cluster['activeImage'] != alt_image:
self._sleep()
try:
# we may end up with hung connections as our VIFs move...
def signal_handler(signum, stack):
log.debug("Signal handler for sig {}: {}".format(signum, stack))
raise vFXTConnectionFailure("Connection alarm raised")
import signal #pylint: disable=import-outside-toplevel
if hasattr(signal, 'alarm') and hasattr(signal, 'SIGALRM'):
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(60)
cluster = self._xmlrpc_do(self.xmlrpc().cluster.get)
activities = [act for act in self._xmlrpc_do(self.xmlrpc().cluster.listActivities)
if act['id'] not in existing_activities # skip existing
if act['process'] == 'Cluster upgrade' # look for cluster upgrade or activate
or 'software activate' in act['process']]
if 'failed' in [a['state'] for a in activities]:
raise vFXTConfigurationException("Failed to activate alternate image")
if tries % 10 == 0:
log.info('Waiting for active image to switch to {}'.format(alt_image))
activity_status = ', '.join([act['status'] for act in activities])
if activity_status:
log.debug('Current activities: {}'.format(activity_status))
tries += 1
except vFXTConfigurationException as e:
log.debug(e)
raise
except Exception as e:
log.debug("Retrying upgrade check: {}".format(e))
finally:
# reset SIGALRM handler
if hasattr(signal, 'alarm') and hasattr(signal, 'SIGALRM'):
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
if retries is not None:
retries -= 1
if retries == 0:
raise vFXTConnectionFailure("Timeout waiting for active image")
if not ha: # if not HA, we suspended the vservers.... undo here
vservers = self._xmlrpc_do(self.xmlrpc().vserver.list)
for vserver in vservers:
log.info("Unsuspending vserver {} on cluster {}".format(vserver, cluster['name']))
activity = self._xmlrpc_do(self.xmlrpc().vserver.unsuspend, vserver)
self._xmlrpc_wait_for_activity(activity, "Failed to unsuspend vserver {}".format(vserver))
log.info("Upgrade to {} complete".format(alt_image))
def upgrade(self, upgrade_url, retries=None, ha=True):
'''Upgrade a cluster from the provided URL
Arguments:
upgrade_url (str): URL for armada package
retries (int, optional): retry count for switching active images
ha (bool, optional): do an HA upgrade, True
Raises: vFXTConnectionFailure
'''
self.upgrade_alternate_image(upgrade_url, retries=retries)
self.activate_alternate_image(ha=ha, retries=retries)
def add_nodes(self, count=1, **options):
'''Add nodes to the cluster
This extends the address ranges of the cluster and all configured
vservers (if required) to accommodate the new nodes.
Arguments:
count (int, optional): number of nodes to add
skip_cleanup (bool, optional): do not clean up on failure
join_wait (int, optional): join wait time (defaults to wait_for_nodes_to_join default)
skip_node_renaming (bool optional): Do not automatically configure and enforce node naming convention (defaults to False)
address_range_start (str, optional): Specify the first of a custom range of addresses to use
address_range_end (str, optional): Specify the last of a custom range of addresses to use
address_range_netmask (str, optional): Specify the netmask of the custom address range to use
vserver_home_addresses (bool, optional): Update address home configuration for all vservers
**options: options to pass to the service backend
Raises: vFXTCreateFailure
On failure, undoes cluster and vserver configuration changes.
'''
self.reload()
log.info("Extending cluster {} by {}".format(self.name, count))
node_count = len(self.nodes)
if not node_count:
raise vFXTConfigurationException("Cannot add a node to an empty cluster")
self.service._add_cluster_nodes_setup(self, count, **options)
# check to see if we can add nodes with the current licensing information
xmlrpc = self.xmlrpc()
license_data = self._xmlrpc_do(xmlrpc.cluster.listLicenses)
licensed_count = int(license_data['maxNodes'])
if (node_count + count) > licensed_count:
msg = "Cannot expand cluster to {} nodes as the current licensed maximum is {}"
raise vFXTConfigurationException(msg.format(node_count + count, licensed_count))
cluster_data = self._xmlrpc_do(xmlrpc.cluster.get)
cluster_ips_per_node = int(cluster_data['clusterIPNumPerNode'])
vserver_count = len(self._xmlrpc_do(xmlrpc.vserver.list))
existing_vserver = self.in_use_addresses('vserver', xmlrpc=xmlrpc)
existing_cluster = self.in_use_addresses('cluster', xmlrpc=xmlrpc)
need_vserver = ((node_count + count) * vserver_count) - len(existing_vserver)
need_cluster = ((node_count + count) * cluster_ips_per_node) - len(existing_cluster)
need_cluster = need_cluster if need_cluster > 0 else 0
need_vserver = need_vserver if need_vserver > 0 else 0
need_instance = count if self.service.ALLOCATE_INSTANCE_ADDRESSES else 0
in_use_addrs = self.in_use_addresses(xmlrpc=xmlrpc)
if options.get('instance_addresses'):
# check that the instance addresses are not already used by the cluster
try:
existing = []
for address in options['instance_addresses']:
if address in in_use_addrs:
existing.append(address)
else:
# otherwise we should note our intent to use it
in_use_addrs.append(address)
# also check if another instance is using the address
if self.service.in_use_addresses('{}/32'.format(address)):
existing.append(address)
if existing:
raise vFXTConfigurationException("Instance addresses are already in use: {}".format(existing))
if len(options['instance_addresses']) < count:
raise vFXTConfigurationException("Not enough instance addresses provided, require {}".format(count))
except vFXTConfigurationException:
raise
except Exception as e:
log.debug(e)
raise_from(vFXTConfigurationException("Invalid instance addresses: {}".format(options['instance_addresses'])), e)
need_instance = 0
added = [] # cluster and vserver extensions (for undo)
ip_count = need_vserver + need_cluster + need_instance
if ip_count > 0: # if we need more, extend ourselves
custom_ip_config_reqs = ['address_range_start', 'address_range_end', 'address_range_netmask']
if all([options.get(_) for _ in custom_ip_config_reqs]):
avail_ips = Cidr.expand_address_range(options.get('address_range_start'), options.get('address_range_end'))
mask = options.get('address_range_netmask')
if len(avail_ips) < ip_count:
raise vFXTConfigurationException("Not enough addresses provided, require {}".format(ip_count))
if any([_ in in_use_addrs for _ in avail_ips]):
raise vFXTConfigurationException("Specified address range conflicts with existing cluster addresses")
existing = []
for address in avail_ips:
if self.service.in_use_addresses('{}/32'.format(address)):
existing.append(address)
if existing:
raise vFXTConfigurationException("Cluster addresses are already in use: {}".format(existing))
else:
avail_ips, mask = self.service.get_available_addresses(count=ip_count, contiguous=True, in_use=in_use_addrs)
if need_instance:
options['instance_addresses'] = avail_ips[0:need_instance]
del avail_ips[0:need_instance]
if need_cluster > 0:
addresses = avail_ips[0:need_cluster]
del avail_ips[0:need_cluster]
body = {'firstIP': addresses[0], 'netmask': mask, 'lastIP': addresses[-1]}
log.info("Extending cluster address range by {}".format(need_cluster))
log.debug("{}".format(body))
activity = self._xmlrpc_do(xmlrpc.cluster.addClusterIPs, body)
self._xmlrpc_wait_for_activity(activity, "Failed to extend cluster addresses")
added.append({'cluster': body})
if need_vserver > 0:
for vserver in self._xmlrpc_do(xmlrpc.vserver.list):
v_len = len([a for r in self._xmlrpc_do(xmlrpc.vserver.get, vserver)[vserver]['clientFacingIPs']
for a in range(Cidr.from_address(r['firstIP']), Cidr.from_address(r['lastIP']) + 1)])
to_add = (node_count + count) - v_len
if to_add < 1:
continue
addresses = avail_ips[0:to_add]
del avail_ips[0:to_add]
body = {'firstIP': addresses[0], 'netmask': mask, 'lastIP': addresses[-1]}
log.info("Extending vserver {} address range by {}".format(vserver, need_vserver))
log.debug("{}".format(body))
activity = self._xmlrpc_do(xmlrpc.vserver.addClientIPs, vserver, body)
self._xmlrpc_wait_for_activity(activity, "Failed to extend vserver {} addresses".format(vserver))
added.append({'vserver': body})
# now add the node(s)
try:
self.service.add_cluster_nodes(self, count, **options)
self.wait_for_service_checks()
# book keeping... may have to wait for a node to update image
wait = int(options.get('join_wait', 500 + (500 * math.log(count))))
self.allow_node_join(retries=wait)
self.wait_for_nodes_to_join(retries=wait)
self.allow_node_join(enable=False, retries=wait)
self.refresh()
self.enable_ha()
if not options.get('skip_node_renaming'):
self.set_node_naming_policy()
if options.get('vserver_home_addresses'):
self.vserver_home_addresses()
except (KeyboardInterrupt, Exception) as e:
log.error(e)
if options.get('skip_cleanup', False):
try:
self.telemetry()
except Exception as te:
log.debug(te)
raise_from(vFXTCreateFailure(e), e)
log.info("Undoing configuration changes for node addition")
# our current list
expected_nodes = [n.id() for n in self.nodes]
# refresh and get what the cluster sees
self.service.load_cluster_information(self)
joined_nodes = [n.id() for n in self.nodes]
# find the difference
unjoined = list(set(expected_nodes) ^ set(joined_nodes))
unjoined_nodes = [ServiceInstance(self.service, i) for i in unjoined]
# exclude those in the middle of joining
joining_node_addresses = [_['address'] for _ in self._xmlrpc_do(self.xmlrpc().node.listUnconfiguredNodes) if 'joining' in _['status']]
unjoined_nodes = [_ for _ in unjoined_nodes if _.ip() not in joining_node_addresses]
# destroy the difference
if unjoined_nodes:
try:
self.parallel_call(unjoined_nodes, 'destroy')
except Exception as destroy_e:
log.error('Failed to undo configuration: {}'.format(destroy_e))
# if we added no nodes successfully, clean up addresses added
none_joined = len(unjoined) == count
nothing_created = node_count == len(joined_nodes)
if none_joined or nothing_created:
for a in added:
if 'vserver' in a:
a = a['vserver']
for vserver in self._xmlrpc_do(self.xmlrpc().vserver.list):
for r in self._xmlrpc_do(self.xmlrpc().vserver.get, vserver)[vserver]['clientFacingIPs']:
if r['firstIP'] == a['firstIP'] and r['lastIP'] == a['lastIP']:
log.debug("Removing vserver range {}".format(r))
activity = self._xmlrpc_do(self.xmlrpc().vserver.removeClientIPs, vserver, r['name'])
try:
self._xmlrpc_wait_for_activity(activity, "Failed to undo vserver extension")
except Exception as e:
log.error(e)
if 'cluster' in a:
a = a['cluster']
for r in self._xmlrpc_do(self.xmlrpc().cluster.get)['clusterIPs']:
if r['firstIP'] == a['firstIP'] and r['lastIP'] == a['lastIP']:
log.debug("Removing cluster range {}".format(r))
try:
activity = self._xmlrpc_do(self.xmlrpc().cluster.removeClusterIPs, r['name'])
self._xmlrpc_wait_for_activity(activity, "Failed to undo cluster extension")
except Exception as e:
log.error(e)
raise_from(vFXTCreateFailure(e), e)
def parallel_call(self, serviceinstances, method, **options):
'''Run the named method across all nodes
A thread is spawned to run the method for each instance.
Arguments:
serviceinstances [ServiceInstance]: list of ServiceInstance objects
method (str): method to call on each ServiceInstance
Raises: vFXTServiceFailure
'''
threads = []
failq = Queue.Queue()
def thread_cb(service, instance_id, q):
'''thread callback'''
try:
# create the instance within the thread, retry initial load prior to calling the method
retries = service.CLOUD_API_RETRIES
while True:
try:
instance = ServiceInstance(service=service, instance_id=instance_id)
break
except Exception:
if retries == 0:
raise
retries -= 1
instance.__getattribute__(method)(**options)
except Exception as e:
log.error("Failed to {} {}: {}".format(method, instance_id, e))
if log.isEnabledFor(logging.DEBUG):
log.exception(e)
q.put(("Failed to {} instance {}".format(method, instance_id), e))
for si in serviceinstances:
t = threading.Thread(target=thread_cb, args=(si.service, si.instance_id, failq,))
t.setDaemon(True)
t.start()
threads.append(t)
for t in threads:
t.join()
failed = []
while True:
try:
failed.append(failq.get_nowait())
except Queue.Empty:
break
if failed:
raise vFXTServiceFailure(failed)
def start(self):
'''Start all nodes in the cluster'''
self.parallel_call(self.nodes, 'start')
self.refresh()
def can_stop(self):
'''Some configurations cannot be stopped. Check if this is one.
'''
return all([_.can_stop() for _ in self.nodes])
def stop(self, clean_stop=True, retries=ServiceBase.WAIT_FOR_STOP):
'''Stop all nodes in the cluster
Arguments:
clean_stop (bool, optional): Issues cluster powerdown first (defaults to True)
retries (int, optional): number of retries (default 600)
'''
# we might be only a collection of nodes... make sure we have mgmt ip,
# password, etc... if so we power down the cluster before calling the
# service backend stop.
if clean_stop and (self.admin_password and self.nodes and self.is_on()):
# if we don't have the mgmt ip, use node1
if not self.mgmt_ip:
self.mgmt_ip = self.nodes[0].ip()
if not all([_.can_stop() for _ in self.nodes]):
raise vFXTConfigurationException("Node configuration prevents them from being stopped")
log.info("Powering down the cluster")
response = self._xmlrpc_do(self.xmlrpc().cluster.powerdown)
if response != 'success':
raise vFXTStatusFailure("Failed to power down the cluster: {}".format(response))
log.info("Waiting for cluster to go offline")
while self.is_on():
self._sleep()
self.refresh()
retries -= 1
if retries == 0:
raise vFXTStatusFailure("Timed out waiting for the cluster to go offline")
self.parallel_call(self.nodes, 'stop')
self.refresh()
def restart(self):
'''Calls stop and then start'''
self.stop()
self.start()
def destroy(self, **options):
'''Destroy the cluster
Arguments:
quick_destroy (bool, optional) skip cleanup steps that prevent data loss (defaults to False)
**options: passed to ServiceInstance.destroy()
'''
if not options.pop('quick_destroy', False) and self.is_on() and self.admin_password:
xmlrpc = self.xmlrpc()
cluster_name = self.name or 'unknown'
corefilers = {k: v for _ in self._xmlrpc_do(xmlrpc.corefiler.list) for k, v in self._xmlrpc_do(xmlrpc.corefiler.get, _).items()}
if corefilers:
# remove all junctions
for vserver in self._xmlrpc_do(xmlrpc.vserver.list):
log.info("Suspending vserver {} on cluster {}".format(vserver, cluster_name))
activity = self._xmlrpc_do(xmlrpc.vserver.suspend, vserver)
self._xmlrpc_wait_for_activity(activity, "Failed to suspend vserver {}".format(vserver))
for junction in self._xmlrpc_do(xmlrpc.vserver.listJunctions, vserver):
log.info("Removing junction {} from vserver {} on cluster {}".format(junction['path'], vserver, cluster_name))
activity = self._xmlrpc_do(xmlrpc.vserver.removeJunction, vserver, junction['path'])
self._xmlrpc_wait_for_activity(activity, "Failed to remove junction {} from vserver {}".format(junction['path'], vserver))
for corefiler, data in corefilers.items():
# try and call corefiler.flush, note this will raise vFXTConfigurationException
# on error... That will bubble up and prevent the rest of the destroy from
# completing
if data['type'] == 'cloud':
self.flush_corefiler(corefiler)
# otherwise remove corefilers to force a flush
log.info("Removing corefiler {} on cluster {}".format(corefiler, cluster_name))
self.remove_corefiler(corefiler)
if self.service.STOP_BEFORE_DELETE:
self.stop()
self.parallel_call(self.nodes, 'destroy', **options)
# any post destroy cleanup activities that may be remaining
self.service.post_destroy_cluster(self)
def shelve(self, **options):
'''Shelve all nodes in the cluster'''
# if we can make rpc calls, try to use maint.setShelve()
if not self.admin_password or not (self.nodes and self.is_on()):
raise vFXTConfigurationException('Unable to shelve cluster without xmlrpc connectivity')
# if we don't have the mgmt ip, use node1
if not self.mgmt_ip:
self.mgmt_ip = self.nodes[0].ip()
if not all([_.can_shelve() for _ in self.nodes]):
raise vFXTConfigurationException("Node configuration prevents them from being shelved")
try:
xmlrpc = self.xmlrpc()
corefilers = xmlrpc.corefiler.list()
if corefilers:
self._enable_maintenance_api(xmlrpc)
activity = self._xmlrpc_do(xmlrpc.maint.suspendAccess)
self._xmlrpc_wait_for_activity(activity, "Failed to suspend access", retries=self.service.WAIT_FOR_SUCCESS)
for corefiler in corefilers:
log.debug("Flushing corefiler {}".format(corefiler))
self.flush_corefiler(corefiler)
except xmlrpclib_Fault as e:
if int(e.faultCode) != 108: # Method not supported
log.debug("Failed to flush corefilers: {}".format(e))
raise vFXTConfigurationException(e)
except Exception as e:
log.debug("Failed to flush corefilers: {}".format(e))
raise
try:
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
response = self._xmlrpc_do(xmlrpc.maint.setShelve)
if response != 'success':
raise vFXTConfigurationException('Failed to notify cluster of intent to shelve')
log.debug('Called maint.setShelve()')
except xmlrpclib_Fault as e:
if int(e.faultCode) != 108: # Method maint.setShelve not supported
raise
log.debug('maint.setShelve not supported in this release')
self.stop(clean_stop=options.get('clean_stop', True))
self.parallel_call(self.nodes, 'shelve', **options)
self.refresh()
def unshelve(self, **options):
'''Unshelve all nodes in the cluster'''
self.parallel_call(self.nodes, 'unshelve', **options)
self.refresh()
# we might be only a collection of nodes... make sure we have mgmt ip,
# password, etc... if so we wait at least until we have api connectivity
if self.mgmt_ip and self.admin_password and self.nodes and self.is_on():
self.wait_for_healthcheck(state='red', duration=1, conn_retries=ServiceBase.WAIT_FOR_SUCCESS)
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
activity = self._xmlrpc_do(xmlrpc.maint.unsuspendAccess)
self._xmlrpc_wait_for_activity(activity, "Failed to unsuspend access", retries=self.service.WAIT_FOR_SUCCESS)
def is_on(self):
'''Returns true if all nodes are on'''
if self.nodes:
return all(i.is_on() for i in self.nodes)
return False
def is_off(self):
'''Returns true if all nodes are off'''
if self.nodes:
return all(i.is_off() for i in self.nodes)
return False
def is_shelved(self):
'''Returns true if all nodes are shelved'''
if self.is_off():
return all([n.is_shelved() for n in self.nodes])
else:
return False
def status(self):
'''Returns a list of node id:status'''
return [{n.id(): n.status()} for n in self.nodes]
def wait_for_service_checks(self):
'''Wait for Service checks to complete for all nodes
This may not be available for all backends and thus may be a noop.
'''
self.parallel_call(self.nodes, 'wait_for_service_checks')
def make_test_bucket(self, bucketname=None, corefiler=None, proxy=None, remove_on_fail=False, **options):
'''Create a test bucket for the cluster
Convenience wrapper function for testing. Calls create_bucket()
and then attach_bucket().
Arguments:
bucketname (str, optional): name of bucket or one is generated
corefiler (str, optional): name of corefiler or bucketname
proxy (str, optional): proxy configuration to use
remove_on_fail (bool, optional): remove the corefiler if the configuration does not finish
tags (dict, optional): tags with key/value labels to apply to the bucket (if supported)
**options: passed through to service.create_bucket and cluster.attach_bucket
Returns:
key (dict): encryption key for the bucket as returned from attach_bucket
'''
bucketname = bucketname or "{}-{}".format(self.name, str(uuid.uuid4()).lower().replace('-', ''))[0:63]
corefiler = corefiler or bucketname
self.service.create_bucket(bucketname, **options)
log.info("Created cloud storage {} ".format(bucketname))
return self.attach_bucket(corefiler, bucketname, proxy=proxy, remove_on_fail=remove_on_fail, **options)
def attach_bucket(self, corefiler, bucketname, master_password=None, credential=None, proxy=None, **options):
'''Attach a named bucket as core filer
Arguments:
corefiler (str): name of the corefiler to create
bucketname (str): name of existing bucket to attach
master_password (str, optional): otherwise cluster admin password is used
credential (str, optional): cloud credential or one is created or reused by the backing service
proxy (str, optional): proxy configuration to use
type (str, optional): type of corefiler (default 'cloud')
cloud_type (str, optional): cloud type (default 's3')
s3_type (str, optional): S3 type (default Service.S3TYPE_NAME)
https (str, optional): 'yes' or 'no' to use HTTPS (default 'yes')
crypto_mode (str, optional): crypto mode (default CBC-AES-256-HMAC-SHA-512)
compress_mode (str, optional): compression mode (default LZ4)
https_verify_mode (str, optional): DISABLED, OCSP, CRL, or OCSP_CRL
remove_on_fail (bool, optional): remove the corefiler if the configuration does not finish
existing_data (bool, optional): the bucket has existing data in it (defaults to False)
Returns:
key (dict): encryption key for the bucket if encryption is enabled
Raises: vFXTConfigurationException
'''
xmlrpc = self.xmlrpc()
if corefiler in self._xmlrpc_do(xmlrpc.corefiler.list):
raise vFXTConfigurationException("Corefiler {} exists".format(corefiler))
if not credential:
log.debug("Looking up credential as none was specified")
credential = self.service.authorize_bucket(self, bucketname, xmlrpc=xmlrpc)
log.debug("Using credential {}".format(credential))
# set proxy if provided
if not proxy:
if self.proxy:
proxy = self.proxy.hostname
data = {
'type': options.get('type') or 'cloud',
'cloudType': options.get('cloud_type') or self.service.COREFILER_TYPE,
'bucket': bucketname,
'cloudCredential': credential,
'https': options.get('https') or 'yes',
'sslVerifyMode': options.get('https_verify_mode') or 'OCSP_CRL',
'compressMode': options.get('compress_mode') or 'LZ4',
'cryptoMode': options.get('crypto_mode') or 'CBC-AES-256-HMAC-SHA-512',
'proxy': proxy or '',
'bucketContents': 'used' if options.get('existing_data', False) else 'empty',
}
if options.get('serverName'):
data['serverName'] = options.get('serverName')
if data['cloudType'] == 's3':
data['s3Type'] = options.get('s3_type') or self.service.S3TYPE_NAME
log.info("Creating corefiler {}".format(corefiler))
log.debug("corefiler.createCloudFiler options {}".format(data))
activity = None
retries = self.LICENSE_TIMEOUT
while True:
try:
activity = xmlrpc.corefiler.createCloudFiler(corefiler, data)
break
except xmlrpclib_Fault as e:
# These errors are non-fatal:
# This cluster is not licensed for cloud core filers. A FlashCloud license is required.
# Cannot modify while a group of nodes is joining
allowed_errors = ['a group of nodes is joining', 'A FlashCloud license is required']
if not any([_ in e.faultString for _ in allowed_errors]):
raise
log.debug("Waiting for error to clear: {}".format(e))
if retries == 0:
raise
retries -= 1
self._sleep()
self._xmlrpc_wait_for_activity(activity, "Failed to create corefiler {}".format(corefiler), retries=self.service.WAIT_FOR_SUCCESS)
def _cleanup():
# try and remove it
if options.get('remove_on_fail'):
try:
self.remove_corefiler(corefiler)
except Exception as e:
log.error("Failed to remove corefiler {}: {}".format(corefiler, e))
# we have to wait for the corefiler to show up... may be blocked by other things
# going on after corefiler.createCloudFiler completes.
retries = self.service.WAIT_FOR_SUCCESS
while True:
try:
if corefiler in xmlrpc.corefiler.list():
break
except xmlrpclib_Fault as xfe:
log.debug(xfe)
xmlrpc = self.xmlrpc()
log.debug("Waiting for corefiler to show up")
if retries == 0:
_cleanup()
raise vFXTConfigurationException('Failed to create corefiler {}: Not found'.format(corefiler))
if retries % 10 == 0:
self._log_conditions(xmlrpc)
retries -= 1
self._sleep()
if options.get('crypto_mode') != 'DISABLED':
if not master_password:
log.info("Generating master key for {} using the admin pass phrase".format(corefiler))
master_password = self.admin_password
else:
log.info("Generating master key for {} using the specified pass phrase".format(corefiler))
retries = self.service.XMLRPC_RETRIES
while True:
try:
key = xmlrpc.corefiler.generateMasterKey(corefiler, master_password)
if 'keyId' in key and 'recoveryFile' in key:
break
except Exception as e:
log.debug(e)
if retries == 0:
_cleanup()
raise vFXTConfigurationException('Failed to generate master key for {}: {}'.format(corefiler, e))
retries -= 1
self._sleep()
log.info("Activating master key {} (signature {}) for {}".format(key['keyId'], key['signature'], corefiler))
response = self._xmlrpc_do(xmlrpc.corefiler.activateMasterKey, corefiler, key['keyId'], key['recoveryFile'])
if response != 'success':
_cleanup()
raise vFXTConfigurationException('Failed to activate master key for {}: {}'.format(corefiler, response))
return key
def attach_corefiler(self, corefiler, networkname, **options):
'''Attach a Corefiler
Arguments:
corefiler (str): name of the corefiler to create
networkname (str): network reachable name/address of the filer
retries (int, optional): defaults to ServiceBase.WAIT_FOR_SUCCESS
remove_on_fail (bool, optional): remove if any post create check fails
ignore_warnings (bool, optional): ignore warnings during create, defaults to False
nfs_type (str, optional): specify the type of the NFS server
nfs_type can be one of:
NetappNonClustered
NetappClustered
EmcIsilon
Other (default)
Raises: vFXTConfigurationException
'''
if corefiler in self._xmlrpc_do(self.xmlrpc().corefiler.list):
raise vFXTConfigurationException("Corefiler {} exists".format(corefiler))
try:
socket.gethostbyname(networkname)
except Exception as e:
raise vFXTConfigurationException("Unknown host {}: {}".format(corefiler, e))
ignore_warnings = options.get('ignore_warnings') or False
create_options = {
'filerClass': options.get('nfs_type') or 'Other'
}
log.info("Creating corefiler {}".format(corefiler))
activity = self._xmlrpc_do(self.xmlrpc().corefiler.create, corefiler, networkname, ignore_warnings, create_options)
self._xmlrpc_wait_for_activity(activity, "Failed to create corefiler {}".format(corefiler), retries=self.service.WAIT_FOR_SUCCESS)
# we have to wait for the corefiler to show up... may be blocked by other things
# going on after corefiler.createCloudFiler completes.
retries = options.get('retries') or self.service.WAIT_FOR_SUCCESS
xmlrpc = self.xmlrpc()
while True:
try:
if corefiler in xmlrpc.corefiler.list():
break
except Exception: pass
log.debug("Waiting for corefiler to show up")
if retries == 0:
if options.get('remove_on_fail'):
try:
self.remove_corefiler(corefiler)
except Exception as e:
log.error("Failed to remove corefiler {}: {}".format(corefiler, e))
raise vFXTConfigurationException('Failed to create corefiler {}'.format(corefiler))
if retries % 10 == 0:
self._log_conditions(xmlrpc)
retries -= 1
self._sleep()
def remove_corefiler(self, corefiler):
'''Remove a corefiler
Arguments:
corefiler (str): the name of the corefiler
Raises vFXTConfigurationException
'''
try:
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
activity = self._xmlrpc_do(xmlrpc.corefiler.remove, corefiler)
self._xmlrpc_wait_for_activity(activity, "Failed to remove corefiler {}".format(corefiler))
except vFXTConfigurationException as e:
log.debug(e)
raise
except Exception as e:
raise vFXTConfigurationException(e)
def flush_corefiler(self, corefiler):
'''Flush a corefiler
Arguments:
corefiler (str): the name of the corefiler
Raises vFXTConfigurationException
'''
try:
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
activity = self._xmlrpc_do(xmlrpc.corefiler.flush, corefiler)
self._xmlrpc_wait_for_activity(activity, "Failed to flush corefiler {}".format(corefiler))
except xmlrpclib_Fault as e:
if int(e.faultCode) != 108: # Method not supported
raise vFXTConfigurationException(e)
except Exception as e:
raise vFXTConfigurationException(e)
def add_vserver(self, name, size=0, netmask=None, start_address=None, end_address=None, home_addresses=False, retries=ServiceBase.WAIT_FOR_OPERATION):
'''Add a Vserver
Arguments:
name (str): name of the vserver
size (int, optional): size of the vserver address range (defaults to cluster size)
netmask (str, optional): Network mask for the vserver range
start_address (str, optional): Starting network address for the vserver range
end_address (str, optional): Ending network address for the vserver range
retries (int, optional): number of retries
Calling with netmask, start_address, and end_address will define the vserver with
those values.
Otherwise, calling with or without a size leads to the addresses being determined via
get_available_addresses().
'''
if name in self._xmlrpc_do(self.xmlrpc().vserver.list):
raise vFXTConfigurationException("Vserver '{}' exists".format(name))
if not all([netmask, start_address, end_address]):
if any([netmask, start_address, end_address]):
log.warning("Ignoring address configuration because missing one of {}(start), {}(end), or {}(netmask)".format(start_address, end_address, netmask))
in_use_addrs = self.in_use_addresses()
vserver_ips, netmask = self.service.get_available_addresses(count=size or len(self.nodes), contiguous=True, in_use=in_use_addrs)
start_address = vserver_ips[0]
end_address = vserver_ips[-1]
else:
# Validate
vserver_ips = Cidr.expand_address_range(start_address, end_address)
if len(vserver_ips) < len(self.nodes):
log.warning("Adding vserver address range without enough addresses for all nodes")
log.info("Creating vserver {} ({}-{}/{})".format(name, start_address, end_address, netmask))
activity = self._xmlrpc_do(self.xmlrpc().vserver.create, name, {'firstIP': start_address, 'lastIP': end_address, 'netmask': netmask})
self._xmlrpc_wait_for_activity(activity, "Failed to create vserver {}".format(name), retries=retries)
# wait for vserver to become available
vserver_retries = retries
log.debug("Waiting for vserver '{}' to show up".format(name))
while True:
try:
if name in self._xmlrpc_do(self.xmlrpc().vserver.list):
break
if vserver_retries % 10 == 0:
log.debug("{} not yet configured".format(name))
except Exception as e:
log.debug(e)
vserver_retries -= 1
if vserver_retries == 0:
raise vFXTConfigurationException("Timed out waiting for vserver '{}' to show up.".format(name))
self._sleep()
if home_addresses:
self.vserver_home_addresses(name)
def add_vserver_junction(self, vserver, corefiler, path=None, export='/', subdir=None, retries=ServiceBase.EXTENDED_XMLRPC_RETRIES):
'''Add a Junction to a Vserver
Arguments:
vserver (str): name of the vserver
corefiler (str): name of the corefiler
path (str, optional): path of the junction (default /{corefiler})
export (str, optional): export path (default /)
subdir (str, optional): subdirectory within the export
retries (int, optional): number of retries
Raises: vFXTConfigurationException
'''
if not path:
path = '/{}'.format(corefiler)
if not path.startswith('/'):
#raise vFXTConfigurationException("Junction path must start with /: {}".format(path))
path = '/{}'.format(path)
advanced = {}
if subdir:
advanced['subdir'] = subdir
log.info("Waiting for corefiler exports to show up")
op_retries = self.service.WAIT_FOR_SUCCESS
while True:
try:
exports = self._xmlrpc_do(self.xmlrpc().nfs.listExports, vserver, corefiler)
if exports:
break
except Exception as e:
log.debug(e)
if op_retries == 0:
raise vFXTConfigurationException("Timed out waiting for {} exports".format(corefiler))
if op_retries % 10 == 0:
self._log_conditions(self.xmlrpc())
op_retries -= 1
self._sleep()
log.info("Creating junction {} to {} for vserver {}".format(path, corefiler, vserver))
try:
activity = self._xmlrpc_do(self.xmlrpc().vserver.addJunction, vserver, path, corefiler, export, advanced, _xmlrpc_do_retries=retries)
self._xmlrpc_wait_for_activity(activity, "Failed to add junction to {}".format(vserver))
except Exception as e:
raise vFXTConfigurationException("Failed to add junction to {}: {}".format(vserver, e))
log.debug("Junctioned vserver {} with corefiler {} (path {}, export {})".format(vserver, corefiler, path, export))
def wait_for_nodes_to_join(self, retries=ServiceBase.WAIT_FOR_HEALTH_CHECKS, xmlrpc=None):
'''This performs a check that the cluster configuration matches the
nodes in the object, otherwise it will wait
Arguments:
retries (int): number of retries (default 600)
xmlrpc (xmlrpcClt, optional): xmlrpc client
Raises: vFXTConfigurationException
'''
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
expected = len(self.nodes)
if expected > len(self._xmlrpc_do(xmlrpc.node.list)):
log.info("Waiting for all nodes to join")
start_time = int(time.time())
node_addresses = [n.ip() for n in self.nodes]
while True:
found = 1 # have to find one node at least
try:
found = len(self._xmlrpc_do(xmlrpc.node.list))
if expected == found:
log.debug("Found {}".format(found))
break
except Exception as e:
log.debug("Error getting node list: {}".format(e))
try:
# if nodes are upgrading, delay the retries.. unjoined node status include:
# 'joining: started'
# 'joining: almost done'
# 'joining: upgrade the image'
# 'joining: switch to the new image'
unjoined_status = [_['status'] for _ in self._xmlrpc_do(xmlrpc.node.listUnconfiguredNodes) if _['address'] in node_addresses]
if any(['image' in _ for _ in unjoined_status]):
log.debug("Waiting for image upgrade to finish: {}".format(unjoined_status))
start_time = int(time.time())
continue
except Exception as e:
log.debug("Failed to check unconfigured node status: {}".format(e))
# for connectivity problems... we end up waiting a long time for
# timeouts on the xmlrpc connection... so if we are taking too long
# we should bail
duration = int(time.time()) - start_time
taking_too_long = duration > int(retries * 1.5)
if retries == 0 or taking_too_long:
diff = expected - found
raise vFXTConfigurationException("Timed out waiting for {} node(s) to join.".format(diff))
retries -= 1
if retries % 10 == 0:
log.debug("Found {}, expected {}".format(found, expected))
self._log_conditions(xmlrpc=xmlrpc)
self._sleep()
log.info("All nodes have joined the cluster.")
def enable_ha(self, retries=ServiceBase.XMLRPC_RETRIES, xmlrpc=None):
'''Enable HA on the cluster
Arguments:
retries (int, optional): number of retries
xmlrpc (xmlrpcClt, optional): xmlrpc client
Raises: vFXTConfigurationException
'''
log.info("Enabling HA mode")
try:
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
status = self._xmlrpc_do(xmlrpc.cluster.enableHA, _xmlrpc_do_retries=retries)
if status != 'success':
raise vFXTConfigurationException(status)
except Exception as ha_e:
raise vFXTConfigurationException("Failed to enable HA: {}".format(ha_e))
def rebalance_directory_managers(self, retries=ServiceBase.XMLRPC_RETRIES):
'''Call rebalanceDirManagers via XMLRPC
Arguments:
retries (int): number of retries
Raises: vFXTConfigurationException
'''
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
log.info("Rebalancing directory managers")
try:
status = self._xmlrpc_do(xmlrpc.maint.rebalanceDirManagers, _xmlrpc_do_retries=retries)
if status != 'success':
raise vFXTConfigurationException(status)
except xmlrpclib_Fault as e:
# AVERE_EINVAL, not needed or already in progress
if int(e.faultCode) == 103: #pylint: disable=no-member
return
raise vFXTStatusFailure("Waiting for cluster rebalance failed: {}".format(e))
except Exception as e:
raise vFXTStatusFailure("Waiting for cluster rebalance failed: {}".format(e))
def first_node_configuration(self):
'''Basic configuration for the first cluster node
'''
if not self.mgmt_ip:
raise vFXTConfigurationException("Cannot configure a cluster without a management address")
log.info("Waiting for remote API connectivity")
xmlrpc = None
try:
xmlrpc = self.xmlrpc(retries=ServiceBase.WAIT_FOR_INITIAL_CONNECTION) #pylint: disable=unused-variable
except Exception as e:
self.first_node_error = e
raise
self.set_default_proxy(xmlrpc=xmlrpc)
if self.trace_level:
log.info("Setting trace {}".format(self.trace_level))
support_opts = {'rollingTrace': 'yes', 'traceLevel': self.trace_level}
try:
response = self._xmlrpc_do(xmlrpc.support.modify, support_opts)
if response[0] != 'success':
self.first_node_error = vFXTConfigurationException(response)
raise self.first_node_error #pylint: disable=raising-bad-type
except Exception as e:
log.error("Failed to configure trace options: {}".format(e))
if self.timezone:
log.info("Setting timezone to {}".format(self.timezone))
response = self._xmlrpc_do(xmlrpc.cluster.modify, {'timezone': self.timezone})
if response != 'success':
self.first_node_error = vFXTConfigurationException(response)
raise self.first_node_error #pylint: disable=raising-bad-type
# try and enable HA early if we have support in the AvereOS release for single node
try:
try:
self.enable_ha(xmlrpc=xmlrpc)
except Exception as e:
log.debug("Failed to enable early HA, will retry later: {}".format(e))
except Exception as e:
log.debug("Failed during final first node configuration: {}".format(e))
self.first_node_error = vFXTConfigurationException(e)
raise self.first_node_error #pylint: disable=raising-bad-type
def set_default_proxy(self, name=None, xmlrpc=None):
'''Set the default cluster proxy configuration
Arguments:
name (str, optional): proxy name (defaults to proxy hostname)
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
if not self.proxy:
log.debug("Skipping proxy configuration")
return
name = name or self.proxy.hostname
if not name or not self.proxy.geturl():
raise vFXTConfigurationException("Unable to create proxy configuration: Bad proxy host")
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
body = {'url': self.proxy.geturl(), 'user': self.proxy.username or '', 'password': self.proxy.password or ''}
if name not in self._xmlrpc_do(xmlrpc.cluster.listProxyConfigs):
log.info("Setting proxy configuration")
try:
response = self._xmlrpc_do(xmlrpc.cluster.createProxyConfig, name, body)
if response != 'success':
raise vFXTConfigurationException(response)
except Exception as e:
raise vFXTConfigurationException("Unable to create proxy configuration: {}".format(e))
try:
response = self._xmlrpc_do(xmlrpc.cluster.modify, {'proxy': name})
if response != 'success':
raise vFXTConfigurationException(response)
except Exception as e:
raise vFXTConfigurationException("Unable to configure cluster proxy configuration: {}".format(e))
def allow_node_join(self, enable=True, retries=ServiceBase.WAIT_FOR_HEALTH_CHECKS, xmlrpc=None): #pylint: disable=unused-argument
'''Enable created nodes to join
Arguments:
enable (bool, optional): Allow nodes to join
retries (int): number of retries (default 600)
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
def _compat_allow_node_join(enable, xmlrpc):
setting = 'yes' if enable else 'no'
log.debug("_compat_allow_node_join setting allowAllNodesToJoin to {}".format(setting))
response = self._xmlrpc_do(xmlrpc.cluster.modify, {'allowAllNodesToJoin': setting})
if response != 'success':
raise vFXTConfigurationException("Failed to update allow node join configuration: {}".format(response))
if not enable:
_compat_allow_node_join(enable, xmlrpc)
return
# we have to accumulate all of the nodes we expect to see in node.listUnconfiguredNodes
node_addresses = [_.ip() for _ in self.nodes]
node_count = len(node_addresses)
joined_count = len(self._xmlrpc_do(xmlrpc.node.list))
expected_unjoined_count = node_count - joined_count
unjoined = []
if not expected_unjoined_count:
log.debug("Nodes joined on their own")
return
log.info("Waiting for {} nodes to show up and ask to join cluster".format(expected_unjoined_count))
start_time = int(time.time())
op_retries = retries
while True:
unjoined_count = 0
try:
unjoined = [_ for _ in self._xmlrpc_do(xmlrpc.node.listUnconfiguredNodes) if _['address'] in node_addresses]
unjoined_count = len(unjoined)
if unjoined_count == expected_unjoined_count:
break
except Exception as e:
log.debug("Failed to check unconfigured node status: {}".format(e))
try:
if len(self._xmlrpc_do(xmlrpc.node.list)) == node_count:
log.debug("Nodes joined on their own")
return
except Exception as e:
log.debug("Failed to check joined node status: {}".format(e))
# either we run out of retries or we take too long
duration = int(time.time()) - start_time
taking_too_long = duration > int(retries * 1.5)
if op_retries == 0 or taking_too_long:
diff = expected_unjoined_count - unjoined_count
raise vFXTConfigurationException("Timed out waiting for {} node(s) to come up.".format(diff))
if op_retries % 10 == 0:
unjoined_names = ', '.join([_['name'] for _ in unjoined])
log.debug("Found {} ({}), expected {}".format(unjoined_count, unjoined_names, expected_unjoined_count))
self._log_conditions(xmlrpc=xmlrpc)
op_retries -= 1
self._sleep()
# once we have them, call node.allowToJoin with our nodes in one group
node_names = [_['name'] for _ in unjoined]
log.info("Setting allow join for {} nodes".format(expected_unjoined_count))
log.debug(','.join(node_names))
try:
activity = self._xmlrpc_do(xmlrpc.node.allowToJoin, ','.join(node_names), False)
self._xmlrpc_wait_for_activity(activity, '"Failed to allow multiple node joins', retries=retries)
return
except xmlrpclib_Fault as e:
# older releases cannot accept comma delimited node names
if not any([_ in e.faultString for _ in ['Cannot find node', 'Cannot join the node']]):
raise
# try old way
log.info("Setting node join policy")
_compat_allow_node_join(enable, xmlrpc)
def refresh(self):
'''Refresh instance data of cluster nodes from the backend service'''
for n in self.nodes:
n.refresh()
def reload(self):
'''Reload all cluster information'''
if self.is_on(): # reread configuration, uses xmlrpc so must be on
self.load_cluster_information()
else:
self.refresh()
def export(self):
'''Export the cluster object in an easy to serialize format'''
return {
'name': self.name,
'mgmt_ip': self.mgmt_ip,
'admin_password': self.admin_password,
'nodes': [n.instance_id for n in self.nodes]
}
def _sleep(self, duration=None):
'''General sleep handling'''
time.sleep(duration or self.service.POLLTIME)
@classmethod
def valid_cluster_name(cls, name):
'''Validate the cluster name
Returns: bool
'''
name_len = len(name)
if name_len < 1 or name_len > 128:
return False
if re.search('^[a-z]([-a-z0-9]*[a-z0-9])?$', name):
return True
return False
def in_use_addresses(self, category='all', xmlrpc=None):
'''Get in use addresses from the cluster
Arguments:
category (str): all (default), mgmt, vserver, cluster
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
addresses = set()
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
if category in ['all', 'mgmt']:
addresses.update([self._xmlrpc_do(xmlrpc.cluster.get)['mgmtIP']['IP']])
if category in ['all', 'vserver']:
for vs in self._xmlrpc_do(xmlrpc.vserver.list):
data = self._xmlrpc_do(xmlrpc.vserver.get, vs)
for client_range in data[vs]['clientFacingIPs']:
first = client_range['firstIP']
last = client_range['lastIP']
range_addrs = Cidr.expand_address_range(first, last)
addresses.update(range_addrs)
if category in ['all', 'cluster']:
data = self._xmlrpc_do(xmlrpc.cluster.get)
for cluster_range in data['clusterIPs']:
first = cluster_range['firstIP']
last = cluster_range['lastIP']
range_addrs = Cidr.expand_address_range(first, last)
addresses.update(range_addrs)
return list(addresses)
def set_node_naming_policy(self, xmlrpc=None):
'''Rename nodes internally and set the default node prefix
This sets the node names internally to match the service instance
names. This also sets the node prefix to be the cluster name.
Arguments:
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
if not self.nodes:
log.debug("No nodes to rename, skipping")
return
if not self.node_rename:
log.debug("Skipping node naming configuration")
return
node_ip_map = {ip: n.name() for n in self.nodes for ip in n.in_use_addresses()}
# rename nodes with cluster prefix
log.info("Setting node naming policy")
# first pass, rename new mismatched nodes to their node id
retries = ServiceBase.XMLRPC_RETRIES
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
while True:
try:
node_names = self._xmlrpc_do(xmlrpc.node.list)
nodes = [list(self._xmlrpc_do(xmlrpc.node.get, _).values())[0] for _ in node_names]
for node in nodes:
node_name = node_ip_map.get(node['primaryClusterIP']['IP'], None)
if node_name and node_name != node['name'] and node_name in node_names:
log.debug("Renaming new node {} -> {}".format(node['name'], node['id']))
self._xmlrpc_do(xmlrpc.node.rename, node['name'], node['id'])
break
except Exception as e:
log.debug(e)
if retries == 0:
log.error("Failed to rename nodes: {}".format(e))
break
retries -= 1
# second pass, rename all nodes to their instance names
retries = ServiceBase.XMLRPC_RETRIES
while True:
try:
node_names = self._xmlrpc_do(xmlrpc.node.list)
nodes = [list(self._xmlrpc_do(xmlrpc.node.get, _).values())[0] for _ in node_names]
for node in nodes:
node_name = node_ip_map.get(node['primaryClusterIP']['IP'], None)
if node_name and node_name != node['name'] and node_name not in node_names:
log.debug("Renaming node {} -> {}".format(node['name'], node_name))
self._xmlrpc_do(xmlrpc.node.rename, node['name'], node_name)
break
except Exception as e:
log.debug(e)
if retries == 0:
log.error("Failed to rename nodes: {}".format(e))
break
retries -= 1
def vserver_home_addresses(self, vservers=None, xmlrpc=None):
'''Home the addresses of the vserver across the nodes
Arguments:
vservers (list, optional): list of vservers to home (otherwise all vservers)
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
vservers = vservers or xmlrpc.vserver.list()
if not isinstance(vservers, list):
vservers = [vservers]
nodes = itertools.cycle(sorted(xmlrpc.node.list()))
for vserver in vservers:
home_cfg = self._xmlrpc_do(xmlrpc.vserver.listClientIPHomes, vserver)
# if all addresses are already homed, bail
if [_ for _ in home_cfg if _['home'] != 'None']:
log.debug("Refusing to override existing home configuration")
continue
# get the address ranges from our vserver
vserver_data = xmlrpc.vserver.get(vserver)[vserver]
vifs = set()
for address_range in vserver_data['clientFacingIPs']:
vifs.update(Cidr.expand_address_range(address_range['firstIP'], address_range['lastIP']))
# sort numerically
vifs = [Cidr.to_address(_) for _ in sorted([Cidr.from_address(_) for _ in vifs])]
# build mapping table
mappings = {vif: next(nodes) for vif in vifs}
old_mappings = {_['ip']: _['current'] for _ in home_cfg}
if not [_ for _ in list(mappings.keys()) if mappings[_] != old_mappings.get(_)]:
log.debug("Address home configuration is up to date for vserver '{}'".format(vserver))
continue
log.debug("Setting up addresses home configuration for vserver '{}': {}".format(vserver, mappings))
retries = self.service.EXTENDED_XMLRPC_RETRIES
while True:
try:
activity = self._xmlrpc_do(xmlrpc.vserver.modifyClientIPHomes, vserver, mappings)
self._xmlrpc_wait_for_activity(activity, "Failed to rebalance vserver {} addresses".format(vserver))
break
except Exception as e:
log.debug(e)
if retries == 0:
raise
retries -= 1
|
alarm.py
|
# Copyright (c) 2009-2019 Tom Keffer <tkeffer@gmail.com>
# See the file LICENSE.txt for your rights.
"""Example of how to implement an alarm in WeeWX.
*******************************************************************************
To use this alarm, add the following to the weewx configuration file:
[Alarm]
expression = "outTemp < 40.0"
time_wait = 3600
smtp_host = smtp.example.com
smtp_user = myusername
smtp_password = mypassword
from = sally@example.com
mailto = jane@example.com, bob@example.com
subject = "Alarm message from weewx!"
In this example, if the outside temperature falls below 40, it will send an
email to the users specified in the comma separated list specified in option
"mailto", in this case:
jane@example.com, bob@example.com
The example assumes an SMTP email server at smtp.example.com that requires
login. If the SMTP server does not require login, leave out the lines for
smtp_user and smtp_password.
Setting an email "from" is optional. If not supplied, one will be filled in,
but your SMTP server may or may not accept it.
Setting an email "subject" is optional. If not supplied, one will be filled in.
To avoid a flood of emails, one will only be sent every 3600 seconds (one
hour).
*******************************************************************************
To enable this service:
1) Copy this file to the user directory. See https://bit.ly/33YHsqX for where your user
directory is located.
2) Modify the weewx configuration file by adding this service to the option
"report_services", located in section [Engine][[Services]].
[Engine]
[[Services]]
...
report_services = weewx.engine.StdPrint, weewx.engine.StdReport, user.alarm.MyAlarm
*******************************************************************************
If you wish to use both this example and the lowBattery.py example, simply
merge the two configuration options together under [Alarm] and add both
services to report_services.
*******************************************************************************
"""
import smtplib
import socket
import syslog
import threading
import time
from email.mime.text import MIMEText
import weewx
from weeutil.weeutil import timestamp_to_string, option_as_list
from weewx.engine import StdService
# Inherit from the base class StdService:
class MyAlarm(StdService):
"""Service that sends email if an arbitrary expression evaluates true"""
def __init__(self, engine, config_dict):
# Pass the initialization information on to my superclass:
super(MyAlarm, self).__init__(engine, config_dict)
# This will hold the time when the last alarm message went out:
self.last_msg_ts = 0
try:
# Dig the needed options out of the configuration dictionary.
# If a critical option is missing, an exception will be raised and
# the alarm will not be set.
self.expression = config_dict['Alarm']['expression']
self.time_wait = int(config_dict['Alarm'].get('time_wait', 3600))
self.timeout = int(config_dict['Alarm'].get('timeout', 10))
self.smtp_host = config_dict['Alarm']['smtp_host']
self.smtp_user = config_dict['Alarm'].get('smtp_user')
self.smtp_password = config_dict['Alarm'].get('smtp_password')
self.SUBJECT = config_dict['Alarm'].get('subject', "Alarm message from weewx")
self.FROM = config_dict['Alarm'].get('from', 'alarm@example.com')
self.TO = option_as_list(config_dict['Alarm']['mailto'])
syslog.syslog(syslog.LOG_INFO, "alarm: Alarm set for expression: '%s'" % self.expression)
# If we got this far, it's ok to start intercepting events:
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record) # NOTE 1
except KeyError as e:
syslog.syslog(syslog.LOG_INFO, "alarm: No alarm set. Missing parameter: %s" % e)
def new_archive_record(self, event):
"""Gets called on a new archive record event."""
# To avoid a flood of nearly identical emails, this will do
# the check only if we have never sent an email, or if we haven't
# sent one in the last self.time_wait seconds:
if not self.last_msg_ts or abs(time.time() - self.last_msg_ts) >= self.time_wait:
# Get the new archive record:
record = event.record
# Be prepared to catch an exception in the case that the expression contains
# a variable that is not in the record:
try: # NOTE 2
# Evaluate the expression in the context of the event archive record.
# Sound the alarm if it evaluates true:
if eval(self.expression, None, record): # NOTE 3
# Sound the alarm!
# Launch in a separate thread so it doesn't block the main LOOP thread:
t = threading.Thread(target=MyAlarm.sound_the_alarm, args=(self, record))
t.start()
# Record when the message went out:
self.last_msg_ts = time.time()
except NameError as e:
# The record was missing a named variable. Log it.
syslog.syslog(syslog.LOG_DEBUG, "alarm: %s" % e)
def sound_the_alarm(self, rec):
"""Sound the alarm in a 'try' block"""
# Wrap the attempt in a 'try' block so we can log a failure.
try:
self.do_alarm(rec)
except socket.gaierror:
# A gaierror exception is usually caused by an unknown host
syslog.syslog(syslog.LOG_CRIT, "alarm: unknown host %s" % self.smtp_host)
# Reraise the exception. This will cause the thread to exit.
raise
except Exception as e:
syslog.syslog(syslog.LOG_CRIT, "alarm: unable to sound alarm. Reason: %s" % e)
# Reraise the exception. This will cause the thread to exit.
raise
def do_alarm(self, rec):
"""Send an email out"""
# Get the time and convert to a string:
t_str = timestamp_to_string(rec['dateTime'])
# Log the alarm
syslog.syslog(syslog.LOG_INFO, 'alarm: Alarm expression "%s" evaluated True at %s' % (self.expression, t_str))
# Form the message text:
msg_text = 'Alarm expression "%s" evaluated True at %s\nRecord:\n%s' % (self.expression, t_str, str(rec))
# Convert to MIME:
msg = MIMEText(msg_text)
# Fill in MIME headers:
msg['Subject'] = self.SUBJECT
msg['From'] = self.FROM
msg['To'] = ','.join(self.TO)
try:
# First try end-to-end encryption
s = smtplib.SMTP_SSL(self.smtp_host, timeout=self.timeout)
syslog.syslog(syslog.LOG_DEBUG, "alarm: using SMTP_SSL")
except (AttributeError, socket.timeout, socket.error):
syslog.syslog(syslog.LOG_DEBUG, "alarm: unable to use SMTP_SSL connection.")
# If that doesn't work, try creating an insecure host, then upgrading
s = smtplib.SMTP(self.smtp_host, timeout=self.timeout)
try:
# Be prepared to catch an exception if the server
# does not support encrypted transport.
s.ehlo()
s.starttls()
s.ehlo()
syslog.syslog(syslog.LOG_DEBUG,
"alarm: using SMTP encrypted transport")
except smtplib.SMTPException:
syslog.syslog(syslog.LOG_DEBUG,
"alarm: using SMTP unencrypted transport")
try:
# If a username has been given, assume that login is required for this host:
if self.smtp_user:
s.login(self.smtp_user, self.smtp_password)
syslog.syslog(syslog.LOG_DEBUG, "alarm: logged in with user name %s" % self.smtp_user)
# Send the email:
s.sendmail(msg['From'], self.TO, msg.as_string())
# Log out of the server:
s.quit()
except Exception as e:
syslog.syslog(syslog.LOG_ERR, "alarm: SMTP mailer refused message with error %s" % e)
raise
# Log sending the email:
syslog.syslog(syslog.LOG_INFO, "alarm: email sent to: %s" % self.TO)
if __name__ == '__main__':
"""This section is used to test alarm.py. It uses a record and alarm
expression that are guaranteed to trigger an alert.
You will need a valid weewx.conf configuration file with an [Alarm]
section that has been set up as illustrated at the top of this file."""
from optparse import OptionParser
import weecfg
usage = """Usage: python alarm.py --help
python alarm.py [CONFIG_FILE|--config=CONFIG_FILE]
Arguments:
CONFIG_PATH: Path to weewx.conf """
epilog = """You must be sure the WeeWX modules are in your PYTHONPATH. For example:
PYTHONPATH=/home/weewx/bin python alarm.py --help"""
weewx.debug = 1
# Set defaults for the system logger:
syslog.openlog('alarm.py', syslog.LOG_PID | syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
# Create a command line parser:
parser = OptionParser(usage=usage,
epilog=epilog)
parser.add_option("--config", dest="config_path", metavar="CONFIG_FILE",
help="Use configuration file CONFIG_FILE.")
# Parse the arguments and options
(options, args) = parser.parse_args()
try:
config_path, config_dict = weecfg.read_config(options.config_path, args)
except IOError as e:
exit("Unable to open configuration file: %s" % e)
print("Using configuration file %s" % config_path)
if 'Alarm' not in config_dict:
exit("No [Alarm] section in the configuration file %s" % config_path)
# This is a fake record that we'll use
rec = {'extraTemp1': 1.0,
'outTemp': 38.2,
'dateTime': int(time.time())}
# Use an expression that will evaluate to True by our fake record.
config_dict['Alarm']['expression'] = "outTemp<40.0"
# We need the main WeeWX engine in order to bind to the event, but we don't need
# for it to completely start up. So get rid of all services:
config_dict['Engine']['Services'] = {}
# Now we can instantiate our slim engine...
engine = weewx.engine.StdEngine(config_dict)
# ... and set the alarm using it.
alarm = MyAlarm(engine, config_dict)
# Create a NEW_ARCHIVE_RECORD event
event = weewx.Event(weewx.NEW_ARCHIVE_RECORD, record=rec)
# Use it to trigger the alarm:
alarm.new_archive_record(event)
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_dash.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_dash.bip32 import BIP32Node
from electrum_dash import constants
from electrum_dash.dash_tx import to_varbytes, serialize_extra_payload
from electrum_dash.i18n import _
from electrum_dash.plugin import Device, runs_in_hwd_thread
from electrum_dash.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_dash.keystore import Hardware_KeyStore
from electrum_dash.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
from .client import SafeTClient
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
plugin: 'SafeTPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None:
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
@runs_in_hwd_thread
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['SafeTClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Dash Testnet" if constants.net.TESTNET else "Dash"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh',):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh',):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_safet_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_safet_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype._extend_address_n(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not txout.is_ps_ks and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for out in tx.outputs():
o = t._add_bin_outputs()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
if t.version > 2:
tx_type = tx.tx_type
if tx_type:
t.extra_data = to_varbytes(serialize_extra_payload(tx))
t.version |= tx_type << 16
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
pair-thread.py
|
# this example uses a background thread to run the daemon in, also works on Windows
import socket
import threading
from Pyro5.api import expose, Daemon, Proxy
# create our own socket pair (server-client sockets that are already connected)
sock1, sock2 = socket.socketpair()
class Echo(object):
@expose
def echo(self, message):
print("server got message: ", message)
return "thank you"
# create a daemon with some Pyro objectrunning on our custom server socket
daemon = Daemon(connected_socket=sock1)
daemon.register(Echo, "echo")
print("(Pyro daemon running on", daemon.locationStr, ")\n")
daemonthread = threading.Thread(target=daemon.requestLoop)
daemonthread.daemon = True
daemonthread.start()
# create a client running on the client socket
with Proxy("echo", connected_socket=sock2) as p:
reply = p.echo("hello!")
print("client got reply:", reply)
reply = p.echo("hello again!")
print("client got reply:", reply)
with Proxy("echo", connected_socket=sock2) as p:
reply = p.echo("hello2!")
print("client got reply:", reply)
reply = p.echo("hello2 again!")
print("client got reply:", reply)
print("\nThe end.")
|
Utils.py
|
#
# Cython -- Things that don't belong
# anywhere else in particular
#
import os
import sys
import re
import io
import codecs
from contextlib import contextmanager
modification_time = os.path.getmtime
def cached_function(f):
cache = {}
uncomputed = object()
def wrapper(*args):
res = cache.get(args, uncomputed)
if res is uncomputed:
res = cache[args] = f(*args)
return res
wrapper.uncached = f
return wrapper
def cached_method(f):
cache_name = '__%s_cache' % f.__name__
def wrapper(self, *args):
cache = getattr(self, cache_name, None)
if cache is None:
cache = {}
setattr(self, cache_name, cache)
if args in cache:
return cache[args]
res = cache[args] = f(self, *args)
return res
return wrapper
def replace_suffix(path, newsuf):
base, _ = os.path.splitext(path)
return base + newsuf
def open_new_file(path):
if os.path.exists(path):
# Make sure to create a new file here so we can
# safely hard link the output files.
os.unlink(path)
# we use the ISO-8859-1 encoding here because we only write pure
# ASCII strings or (e.g. for file names) byte encoded strings as
# Unicode, so we need a direct mapping from the first 256 Unicode
# characters to a byte sequence, which ISO-8859-1 provides
# note: can't use io.open() in Py2 as we may be writing str objects
return codecs.open(path, "w", encoding="ISO-8859-1")
def castrate_file(path, st):
# Remove junk contents from an output file after a
# failed compilation.
# Also sets access and modification times back to
# those specified by st (a stat struct).
try:
f = open_new_file(path)
except EnvironmentError:
pass
else:
f.write(
"#error Do not use this file, it is the result of a failed Cython compilation.\n")
f.close()
if st:
os.utime(path, (st.st_atime, st.st_mtime-1))
def file_newer_than(path, time):
ftime = modification_time(path)
return ftime > time
@cached_function
def search_include_directories(dirs, qualified_name, suffix, pos,
include=False, sys_path=False):
# Search the list of include directories for the given
# file name. If a source file position is given, first
# searches the directory containing that file. Returns
# None if not found, but does not report an error.
# The 'include' option will disable package dereferencing.
# If 'sys_path' is True, also search sys.path.
if sys_path:
dirs = dirs + tuple(sys.path)
if pos:
file_desc = pos[0]
from Cython.Compiler.Scanning import FileSourceDescriptor
if not isinstance(file_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
if include:
dirs = (os.path.dirname(file_desc.filename),) + dirs
else:
dirs = (find_root_package_dir(file_desc.filename),) + dirs
dotted_filename = qualified_name
if suffix:
dotted_filename += suffix
if not include:
names = qualified_name.split('.')
package_names = tuple(names[:-1])
module_name = names[-1]
module_filename = module_name + suffix
package_filename = "__init__" + suffix
for dir in dirs:
path = os.path.join(dir, dotted_filename)
if path_exists(path):
return path
if not include:
package_dir = check_package_dir(dir, package_names)
if package_dir is not None:
path = os.path.join(package_dir, module_filename)
if path_exists(path):
return path
path = os.path.join(dir, package_dir, module_name,
package_filename)
if path_exists(path):
return path
return None
@cached_function
def find_root_package_dir(file_path):
dir = os.path.dirname(file_path)
if file_path == dir:
return dir
elif is_package_dir(dir):
return find_root_package_dir(dir)
else:
return dir
@cached_function
def check_package_dir(dir, package_names):
for dirname in package_names:
dir = os.path.join(dir, dirname)
if not is_package_dir(dir):
return None
return dir
@cached_function
def is_package_dir(dir_path):
for filename in ("__init__.py",
"__init__.pyc",
"__init__.pyx",
"__init__.pxd"):
path = os.path.join(dir_path, filename)
if path_exists(path):
return 1
@cached_function
def path_exists(path):
# try on the filesystem first
if os.path.exists(path):
return True
# figure out if a PEP 302 loader is around
try:
loader = __loader__
# XXX the code below assumes a 'zipimport.zipimporter' instance
# XXX should be easy to generalize, but too lazy right now to write it
archive_path = getattr(loader, 'archive', None)
if archive_path:
normpath = os.path.normpath(path)
if normpath.startswith(archive_path):
arcname = normpath[len(archive_path)+1:]
try:
loader.get_data(arcname)
return True
except IOError:
return False
except NameError:
pass
return False
# file name encodings
def decode_filename(filename):
if isinstance(filename, unicode):
return filename
try:
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
filename = filename.decode(filename_encoding)
except UnicodeDecodeError:
pass
return filename
# support for source file encoding detection
_match_file_encoding = re.compile(u"coding[:=]\s*([-\w.]+)").search
def detect_file_encoding(source_filename):
f = open_source_file(source_filename, encoding="UTF-8", error_handling='ignore')
try:
return detect_opened_file_encoding(f)
finally:
f.close()
def detect_opened_file_encoding(f):
# PEPs 263 and 3120
# Most of the time the first two lines fall in the first 250 chars,
# and this bulk read/split is much faster.
lines = f.read(250).split(u"\n")
if len(lines) > 1:
m = _match_file_encoding(lines[0])
if m:
return m.group(1)
elif len(lines) > 2:
m = _match_file_encoding(lines[1])
if m:
return m.group(1)
else:
return "UTF-8"
# Fallback to one-char-at-a-time detection.
f.seek(0)
chars = []
for i in range(2):
c = f.read(1)
while c and c != u'\n':
chars.append(c)
c = f.read(1)
encoding = _match_file_encoding(u''.join(chars))
if encoding:
return encoding.group(1)
return "UTF-8"
def skip_bom(f):
"""
Read past a BOM at the beginning of a source file.
This could be added to the scanner, but it's *substantially* easier
to keep it at this level.
"""
if f.read(1) != u'\uFEFF':
f.seek(0)
def open_source_file(source_filename, mode="r",
encoding=None, error_handling=None):
if encoding is None:
# Most of the time the coding is unspecified, so be optimistic that
# it's UTF-8.
f = open_source_file(source_filename, encoding="UTF-8", mode=mode, error_handling='ignore')
encoding = detect_opened_file_encoding(f)
if encoding == "UTF-8" and error_handling == 'ignore':
f.seek(0)
skip_bom(f)
return f
else:
f.close()
if not os.path.exists(source_filename):
try:
loader = __loader__
if source_filename.startswith(loader.archive):
return open_source_from_loader(
loader, source_filename,
encoding, error_handling)
except (NameError, AttributeError):
pass
stream = io.open(source_filename, mode=mode,
encoding=encoding, errors=error_handling)
skip_bom(stream)
return stream
def open_source_from_loader(loader,
source_filename,
encoding=None, error_handling=None):
nrmpath = os.path.normpath(source_filename)
arcname = nrmpath[len(loader.archive)+1:]
data = loader.get_data(arcname)
return io.TextIOWrapper(io.BytesIO(data),
encoding=encoding,
errors=error_handling)
def str_to_number(value):
# note: this expects a string as input that was accepted by the
# parser already
if len(value) < 2:
value = int(value, 0)
elif value[0] == '0':
if value[1] in 'xX':
# hex notation ('0x1AF')
value = int(value[2:], 16)
elif value[1] in 'oO':
# Py3 octal notation ('0o136')
value = int(value[2:], 8)
elif value[1] in 'bB':
# Py3 binary notation ('0b101')
value = int(value[2:], 2)
else:
# Py2 octal notation ('0136')
value = int(value, 8)
else:
value = int(value, 0)
return value
def long_literal(value):
if isinstance(value, basestring):
value = str_to_number(value)
return not -2**31 <= value < 2**31
@cached_function
def get_cython_cache_dir():
"""get the cython cache dir
Priority:
1. CYTHON_CACHE_DIR
2. (OS X): ~/Library/Caches/Cython
(posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined
3. ~/.cython
"""
if 'CYTHON_CACHE_DIR' in os.environ:
return os.environ['CYTHON_CACHE_DIR']
parent = None
if os.name == 'posix':
if sys.platform == 'darwin':
parent = os.path.expanduser('~/Library/Caches')
else:
# this could fallback on ~/.cache
parent = os.environ.get('XDG_CACHE_HOME')
if parent and os.path.isdir(parent):
return os.path.join(parent, 'cython')
# last fallback: ~/.cython
return os.path.expanduser(os.path.join('~', '.cython'))
@contextmanager
def captured_fd(stream=2, encoding=None):
pipe_in = t = None
orig_stream = os.dup(stream) # keep copy of original stream
try:
pipe_in, pipe_out = os.pipe()
os.dup2(pipe_out, stream) # replace stream by copy of pipe
try:
os.close(pipe_out) # close original pipe-out stream
data = []
def copy():
try:
while True:
d = os.read(pipe_in, 1000)
if d:
data.append(d)
else:
break
finally:
os.close(pipe_in)
def get_output():
output = b''.join(data)
if encoding:
output = output.decode(encoding)
return output
from threading import Thread
t = Thread(target=copy)
t.daemon = True # just in case
t.start()
yield get_output
finally:
os.dup2(orig_stream, stream) # restore original stream
if t is not None:
t.join()
finally:
os.close(orig_stream)
def print_bytes(s, end=b'\n', file=sys.stdout, flush=True):
file.flush()
try:
out = file.buffer # Py3
except AttributeError:
out = file # Py2
out.write(s)
if end:
out.write(end)
if flush:
out.flush()
class LazyStr:
def __init__(self, callback):
self.callback = callback
def __str__(self):
return self.callback()
def __repr__(self):
return self.callback()
def __add__(self, right):
return self.callback() + right
def __radd__(self, left):
return left + self.callback()
|
A3C_continuous_action.py
|
"""
Asynchronous Advantage Actor Critic (A3C) with continuous action space, Reinforcement Learning.
The Pendulum example.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
tensorflow r1.3
gym 0.8.0
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
GAME = 'Pendulum-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_EP_STEP = 200
MAX_GLOBAL_EP = 2000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.01
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
normal_dist = tf.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * tf.stop_gradient(td)
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0], A_BOUND[1])
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
return SESS.run(self.A, {self.s: s})[0]
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
for ep_t in range(MAX_EP_STEP):
# if self.name == 'W_0':
# self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
done = True if ep_t == MAX_EP_STEP - 1 else False
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r+8)/8) # normalize
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
|
scrapperGreatest.py
|
import requests
from bs4 import BeautifulSoup
import json
import threading
from nltk import tokenize
# Constants
STORY_DOMAIN = 'https://americanliterature.com'
GREAT_STORIES_URL = 'https://americanliterature.com/100-great-short-stories'
STORIES_FOR_CHILDREN = 'https://americanliterature.com/short-stories-for-children'
TEST_STORY_URL = '/author/kate-chopin/short-story/the-story-of-an-hour'
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# Completed Stories
stories = {}
greatestStoryTitles = []
childrenStoryTitles = []
# Threads
threads = []
# Gets all of the story links
def getStoryLinks(Url, lookfor, parentElement):
page = requests.get(Url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find_all('a')
return [result.get('href') for result in results if (result.find_parent().name == parentElement and lookfor in result.get('href') and result.get('href').count("/") > 1)]
def getGreatestStoryInfo(lock, route):
page = requests.get(STORY_DOMAIN+route)
soup = BeautifulSoup(page.content, 'html.parser')
print(bcolors.WARNING + route + bcolors.ENDC)
title = soup.find('cite').get_text()
image = soup.find('figure').find_next('img').get('src')
author = soup.find('h3').find('a').get_text()
pStart = soup.find('hr').find_next('p')
paragraphs = []
while pStart.name != 'hr':
if(pStart.name == 'p' and pStart.get_text() != ""):
paragraphs.append(tokenize.sent_tokenize(pStart.get_text()))
pStart = pStart.find_next()
info = {
"title": title,
"image": image,
"author": author,
"story": paragraphs,
"category": "Greatest",
"link": STORY_DOMAIN+route
}
lock.acquire()
global stories
stories[title] = info
greatestStoryTitles.append(title)
lock.release()
def getChildrenStoryInfo(lock, route):
page = requests.get(STORY_DOMAIN+route)
soup = BeautifulSoup(page.content, 'html.parser')
print(bcolors.WARNING + route + bcolors.ENDC)
title = soup.find('cite').get_text()
image = soup.find('hr').find_next('img').get('src')
author = ''
if('by' in soup.find('h3').get_text()):
author = soup.find('h3').find('a').get_text()
pStart = soup.find('hr').find_next('p')
paragraphs = []
while pStart.name != 'hr':
if((pStart.name == 'p' or pStart.name == 'pre') and pStart.get_text() != ""):
paragraphs.append(tokenize.sent_tokenize(pStart.get_text()))
pStart = pStart.find_next()
info = {
"title": title,
"image": image,
"author": author,
"story": paragraphs,
"category": "Children",
"link": STORY_DOMAIN+route
}
lock.acquire()
global stories
stories[title] = info
childrenStoryTitles.append(title)
lock.release()
ourLock = threading.Lock()
greastestLinks = getStoryLinks(GREAT_STORIES_URL, "/author/", "span")
childrenLinks = getStoryLinks(STORIES_FOR_CHILDREN, "", "figure")
# getChildrenStoryInfo(ourLock, '/childrens-stories/hansel-and-gretel')
# print(stories)
# print(childrenStoryTitles)
for story in greastestLinks:
# th = threading.Thread(target=getGreatestStoryInfo, args=(ourLock, story))
# threads.append(th)
# th.start()
getGreatestStoryInfo(ourLock, story)
# for thread in threads:
# thread.join()
# threads.clear()
for story in childrenLinks:
# th = threading.Thread(target=getChildrenStoryInfo, args=(ourLock, story))
# threads.append(th)
# th.start()
try:
getChildrenStoryInfo(ourLock, story)
except:
print(bcolors.FAIL + 'Could not get info for ' + story + bcolors.ENDC)
# for thread in threads:
# thread.join()
categories = {
"Greatest": greatestStoryTitles,
"Children": childrenStoryTitles
}
with open('../cache/storyData2.json', 'w') as f:
json.dump(stories, f, indent=4)
with open('../cache/storyTitles.json', 'w') as f2:
json.dump(categories, f2, indent=4)
|
test_time.py
|
from test import support
import decimal
import enum
import locale
import math
import platform
import sys
import sysconfig
import time
import threading
import unittest
try:
import _testcapi
except ImportError:
_testcapi = None
# Max year is only limited by the size of C int.
SIZEOF_INT = sysconfig.get_config_var('SIZEOF_INT') or 4
TIME_MAXYEAR = (1 << 8 * SIZEOF_INT - 1) - 1
TIME_MINYEAR = -TIME_MAXYEAR - 1 + 1900
SEC_TO_US = 10 ** 6
US_TO_NS = 10 ** 3
MS_TO_NS = 10 ** 6
SEC_TO_NS = 10 ** 9
NS_TO_SEC = 10 ** 9
class _PyTime(enum.IntEnum):
# Round towards minus infinity (-inf)
ROUND_FLOOR = 0
# Round towards infinity (+inf)
ROUND_CEILING = 1
# Round to nearest with ties going to nearest even integer
ROUND_HALF_EVEN = 2
# Round away from zero
ROUND_UP = 3
# Rounding modes supported by PyTime
ROUNDING_MODES = (
# (PyTime rounding method, decimal rounding method)
(_PyTime.ROUND_FLOOR, decimal.ROUND_FLOOR),
(_PyTime.ROUND_CEILING, decimal.ROUND_CEILING),
(_PyTime.ROUND_HALF_EVEN, decimal.ROUND_HALF_EVEN),
(_PyTime.ROUND_UP, decimal.ROUND_UP),
)
def busy_wait(duration):
deadline = time.monotonic() + duration
while time.monotonic() < deadline:
pass
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_time(self):
time.time()
info = time.get_clock_info('time')
self.assertFalse(info.monotonic)
self.assertTrue(info.adjustable)
def test_time_ns_type(self):
def check_ns(sec, ns):
self.assertIsInstance(ns, int)
sec_ns = int(sec * 1e9)
# tolerate a difference of 50 ms
self.assertLess((sec_ns - ns), 50 ** 6, (sec, ns))
check_ns(time.time(),
time.time_ns())
check_ns(time.monotonic(),
time.monotonic_ns())
check_ns(time.perf_counter(),
time.perf_counter_ns())
check_ns(time.process_time(),
time.process_time_ns())
if hasattr(time, 'thread_time'):
check_ns(time.thread_time(),
time.thread_time_ns())
if hasattr(time, 'clock_gettime'):
check_ns(time.clock_gettime(time.CLOCK_REALTIME),
time.clock_gettime_ns(time.CLOCK_REALTIME))
def test_clock(self):
with self.assertWarns(DeprecationWarning):
time.clock()
with self.assertWarns(DeprecationWarning):
info = time.get_clock_info('clock')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
def test_clock_realtime(self):
t = time.clock_gettime(time.CLOCK_REALTIME)
self.assertIsInstance(t, float)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
@unittest.skipUnless(hasattr(time, 'CLOCK_MONOTONIC'),
'need time.CLOCK_MONOTONIC')
def test_clock_monotonic(self):
a = time.clock_gettime(time.CLOCK_MONOTONIC)
b = time.clock_gettime(time.CLOCK_MONOTONIC)
self.assertLessEqual(a, b)
@unittest.skipUnless(hasattr(time, 'pthread_getcpuclockid'),
'need time.pthread_getcpuclockid()')
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
def test_pthread_getcpuclockid(self):
clk_id = time.pthread_getcpuclockid(threading.get_ident())
self.assertTrue(type(clk_id) is int)
self.assertNotEqual(clk_id, time.CLOCK_THREAD_CPUTIME_ID)
t1 = time.clock_gettime(clk_id)
t2 = time.clock_gettime(clk_id)
self.assertLessEqual(t1, t2)
@unittest.skipUnless(hasattr(time, 'clock_getres'),
'need time.clock_getres()')
def test_clock_getres(self):
res = time.clock_getres(time.CLOCK_REALTIME)
self.assertGreater(res, 0.0)
self.assertLessEqual(res, 1.0)
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime()')
def test_clock_settime(self):
t = time.clock_gettime(time.CLOCK_REALTIME)
try:
time.clock_settime(time.CLOCK_REALTIME, t)
except PermissionError:
pass
if hasattr(time, 'CLOCK_MONOTONIC'):
self.assertRaises(OSError,
time.clock_settime, time.CLOCK_MONOTONIC, 0)
def test_conversions(self):
self.assertEqual(time.ctime(self.t),
time.asctime(time.localtime(self.t)))
self.assertEqual(int(time.mktime(time.localtime(self.t))),
int(self.t))
def test_sleep(self):
self.assertRaises(ValueError, time.sleep, -2)
self.assertRaises(ValueError, time.sleep, -1)
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
self.assertRaises(TypeError, time.strftime, b'%S', tt)
# embedded null character
self.assertRaises(ValueError, time.strftime, '%S\0', tt)
def _bounds_checking(self, func):
# Make sure that strftime() checks the bounds of the various parts
# of the time tuple (0 is valid for *all* values).
# The year field is tested by other test cases above
# Check month [1, 12] + zero support
func((1900, 0, 1, 0, 0, 0, 0, 1, -1))
func((1900, 12, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
func((1900, 1, 0, 0, 0, 0, 0, 1, -1))
func((1900, 1, 31, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
func((1900, 1, 1, 23, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
func((1900, 1, 1, 0, 59, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
func((1900, 1, 1, 0, 0, 60, 0, 1, -1))
func((1900, 1, 1, 0, 0, 61, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertEqual(func((1900, 1, 1, 0, 0, 0, -1, 1, -1)),
func((1900, 1, 1, 0, 0, 0, +6, 1, -1)))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
func((1900, 1, 1, 0, 0, 0, 0, 0, -1))
func((1900, 1, 1, 0, 0, 0, 0, 366, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_strftime_bounding_check(self):
self._bounds_checking(lambda tup: time.strftime('', tup))
def test_strftime_format_check(self):
# Test that strftime does not crash on invalid format strings
# that may trigger a buffer overread. When not triggered,
# strftime may succeed or raise ValueError depending on
# the platform.
for x in [ '', 'A', '%A', '%AA' ]:
for y in range(0x0, 0x10):
for z in [ '%', 'A%', 'AA%', '%A%', 'A%A%', '%#' ]:
try:
time.strftime(x * y + z)
except ValueError:
pass
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default
# values. No test for daylight savings since strftime() does
# not change output based on its value and no test for year
# because systems vary in their support for year 0.
expected = "2000 01 01 00 00 00 1 001"
with support.check_warnings():
result = time.strftime("%Y %m %d %H %M %S %w %j", (2000,)+(0,)*8)
self.assertEqual(expected, result)
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# raising an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_strptime_bytes(self):
# Make sure only strings are accepted as arguments to strptime.
self.assertRaises(TypeError, time.strptime, b'2009', "%Y")
self.assertRaises(TypeError, time.strptime, '2009', b'%Y')
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see #17572)
with self.assertRaises(ValueError) as e:
time.strptime('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
# additional check for IndexError branch (issue #19545)
with self.assertRaises(ValueError) as e:
time.strptime('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_asctime(self):
time.asctime(time.gmtime(self.t))
# Max year is only limited by the size of C int.
for bigyear in TIME_MAXYEAR, TIME_MINYEAR:
asc = time.asctime((bigyear, 6, 1) + (0,) * 6)
self.assertEqual(asc[-len(str(bigyear)):], str(bigyear))
self.assertRaises(OverflowError, time.asctime,
(TIME_MAXYEAR + 1,) + (0,) * 8)
self.assertRaises(OverflowError, time.asctime,
(TIME_MINYEAR - 1,) + (0,) * 8)
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
self.assertRaises(TypeError, time.asctime, (0,) * 10)
def test_asctime_bounding_check(self):
self._bounds_checking(time.asctime)
def test_ctime(self):
t = time.mktime((1973, 9, 16, 1, 3, 52, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sun Sep 16 01:03:52 1973')
t = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sat Jan 1 00:00:00 2000')
for year in [-100, 100, 1000, 2000, 2050, 10000]:
try:
testval = time.mktime((year, 1, 10) + (0,)*6)
except (ValueError, OverflowError):
# If mktime fails, ctime will fail too. This may happen
# on some platforms.
pass
else:
self.assertEqual(time.ctime(testval)[20:], str(year))
@unittest.skipUnless(hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ'] = victoria
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
# Issue #11886: Australian Eastern Standard Time (UTC+10) is called
# "EST" (as Eastern Standard Time, UTC-5) instead of "AEST"
# (non-DST timezone), and "EDT" instead of "AEDT" (DST timezone),
# on some operating systems (e.g. FreeBSD), which is wrong. See for
# example this bug:
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=93810
self.assertIn(time.tzname[0], ('AEST' 'EST'), time.tzname[0])
self.assertTrue(time.tzname[1] in ('AEDT', 'EDT'), str(time.tzname[1]))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, -36000)
self.assertEqual(time.altzone, -39600)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
# Repair TZ environment variable in case any other tests
# rely on it.
if org_TZ is not None:
environ['TZ'] = org_TZ
elif 'TZ' in environ:
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(OverflowError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_mktime(self):
# Issue #1726687
for t in (-2, -1, 0, 1):
if sys.platform.startswith('aix') and t == -1:
# Issue #11188, #19748: mktime() returns -1 on error. On Linux,
# the tm_wday field is used as a sentinel () to detect if -1 is
# really an error or a valid timestamp. On AIX, tm_wday is
# unchanged even on success and so cannot be used as a
# sentinel.
continue
try:
tt = time.localtime(t)
except (OverflowError, OSError):
pass
else:
self.assertEqual(time.mktime(tt), t)
# Issue #13309: passing extreme values to mktime() or localtime()
# borks the glibc's internal timezone data.
@unittest.skipUnless(platform.libc_ver()[0] != 'glibc',
"disabled because of a bug in glibc. Issue #13309")
def test_mktime_error(self):
# It may not be possible to reliably make mktime return error
# on all platfom. This will make sure that no other exception
# than OverflowError is raised for an extreme value.
tt = time.gmtime(self.t)
tzname = time.strftime('%Z', tt)
self.assertNotEqual(tzname, 'LMT')
try:
time.mktime((-1, 1, 1, 0, 0, 0, -1, -1, -1))
except OverflowError:
pass
self.assertEqual(time.strftime('%Z', tt), tzname)
def test_monotonic(self):
# monotonic() should not go backward
times = [time.monotonic() for n in range(100)]
t1 = times[0]
for t2 in times[1:]:
self.assertGreaterEqual(t2, t1, "times=%s" % times)
t1 = t2
# monotonic() includes time elapsed during a sleep
t1 = time.monotonic()
time.sleep(0.5)
t2 = time.monotonic()
dt = t2 - t1
self.assertGreater(t2, t1)
# Issue #20101: On some Windows machines, dt may be slightly low
self.assertTrue(0.45 <= dt <= 1.0, dt)
# monotonic() is a monotonic but non adjustable clock
info = time.get_clock_info('monotonic')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
def test_perf_counter(self):
time.perf_counter()
def test_process_time(self):
# process_time() should not include time spend during a sleep
start = time.process_time()
time.sleep(0.100)
stop = time.process_time()
# use 20 ms because process_time() has usually a resolution of 15 ms
# on Windows
self.assertLess(stop - start, 0.020)
# bpo-33723: A busy loop of 100 ms should increase process_time()
# by at least 15 ms. Tolerate 15 ms because of the bad resolution of
# the clock on Windows (around 15.6 ms).
min_time = 0.015
busy_time = 0.100
# process_time() should include CPU time spent in any thread
start = time.process_time()
busy_wait(busy_time)
stop = time.process_time()
self.assertGreaterEqual(stop - start, min_time)
t = threading.Thread(target=busy_wait, args=(busy_time,))
start = time.process_time()
t.start()
t.join()
stop = time.process_time()
self.assertGreaterEqual(stop - start, min_time)
info = time.get_clock_info('process_time')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
def test_thread_time(self):
if not hasattr(time, 'thread_time'):
if sys.platform.startswith(('linux', 'win')):
self.fail("time.thread_time() should be available on %r"
% (sys.platform,))
else:
self.skipTest("need time.thread_time")
# thread_time() should not include time spend during a sleep
start = time.thread_time()
time.sleep(0.100)
stop = time.thread_time()
# use 20 ms because thread_time() has usually a resolution of 15 ms
# on Windows
self.assertLess(stop - start, 0.020)
# bpo-33723: A busy loop of 100 ms should increase thread_time()
# by at least 15 ms, but less than 30 ms in other threads.
# Tolerate 15 and 30 ms because of the bad resolution
# of the clock on Windows (around 15.6 ms).
min_time = 0.015
max_time = 0.030
busy_time = 0.100
# thread_time() should include CPU time spent in current thread...
start = time.thread_time()
busy_wait(busy_time)
stop = time.thread_time()
self.assertGreaterEqual(stop - start, min_time)
# ...but not in other threads
t = threading.Thread(target=busy_wait, args=(busy_time,))
start = time.thread_time()
t.start()
t.join()
stop = time.thread_time()
self.assertLess(stop - start, max_time)
info = time.get_clock_info('thread_time')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime')
def test_monotonic_settime(self):
t1 = time.monotonic()
realtime = time.clock_gettime(time.CLOCK_REALTIME)
# jump backward with an offset of 1 hour
try:
time.clock_settime(time.CLOCK_REALTIME, realtime - 3600)
except PermissionError as err:
self.skipTest(err)
t2 = time.monotonic()
time.clock_settime(time.CLOCK_REALTIME, realtime)
# monotonic must not be affected by system clock updates
self.assertGreaterEqual(t2, t1)
def test_localtime_failure(self):
# Issue #13847: check for localtime() failure
invalid_time_t = None
for time_t in (-1, 2**30, 2**33, 2**60):
try:
time.localtime(time_t)
except OverflowError:
self.skipTest("need 64-bit time_t")
except OSError:
invalid_time_t = time_t
break
if invalid_time_t is None:
self.skipTest("unable to find an invalid time_t value")
self.assertRaises(OSError, time.localtime, invalid_time_t)
self.assertRaises(OSError, time.ctime, invalid_time_t)
# Issue #26669: check for localtime() failure
self.assertRaises(ValueError, time.localtime, float("nan"))
self.assertRaises(ValueError, time.ctime, float("nan"))
def test_get_clock_info(self):
clocks = ['clock', 'monotonic', 'perf_counter', 'process_time', 'time']
for name in clocks:
if name == 'clock':
with self.assertWarns(DeprecationWarning):
info = time.get_clock_info('clock')
else:
info = time.get_clock_info(name)
#self.assertIsInstance(info, dict)
self.assertIsInstance(info.implementation, str)
self.assertNotEqual(info.implementation, '')
self.assertIsInstance(info.monotonic, bool)
self.assertIsInstance(info.resolution, float)
# 0.0 < resolution <= 1.0
self.assertGreater(info.resolution, 0.0)
self.assertLessEqual(info.resolution, 1.0)
self.assertIsInstance(info.adjustable, bool)
self.assertRaises(ValueError, time.get_clock_info, 'xxx')
class TestLocale(unittest.TestCase):
def setUp(self):
self.oldloc = locale.setlocale(locale.LC_ALL)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.oldloc)
def test_bug_3061(self):
try:
tmp = locale.setlocale(locale.LC_ALL, "fr_FR")
except locale.Error:
self.skipTest('could not set locale.LC_ALL to fr_FR')
# This should not cause an exception
time.strftime("%B", (2009,2,1,0,0,0,0,0,0))
class _TestAsctimeYear:
_format = '%d'
def yearstr(self, y):
return time.asctime((y,) + (0,) * 8).split()[-1]
def test_large_year(self):
# Check that it doesn't crash for year > 9999
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
class _TestStrftimeYear:
# Issue 13305: For years < 1000, the value is not always
# padded to 4 digits across platforms. The C standard
# assumes year >= 1900, so it does not specify the number
# of digits.
if time.strftime('%Y', (1,) + (0,) * 8) == '0001':
_format = '%04d'
else:
_format = '%d'
def yearstr(self, y):
return time.strftime('%Y', (y,) + (0,) * 8)
def test_4dyear(self):
# Check that we can return the zero padded value.
if self._format == '%04d':
self.test_year('%04d')
else:
def year4d(y):
return time.strftime('%4Y', (y,) + (0,) * 8)
self.test_year('%04d', func=year4d)
def skip_if_not_supported(y):
msg = "strftime() is limited to [1; 9999] with Visual Studio"
# Check that it doesn't crash for year > 9999
try:
time.strftime('%Y', (y,) + (0,) * 8)
except ValueError:
cond = False
else:
cond = True
return unittest.skipUnless(cond, msg)
@skip_if_not_supported(10000)
def test_large_year(self):
return super().test_large_year()
@skip_if_not_supported(0)
def test_negative(self):
return super().test_negative()
del skip_if_not_supported
class _Test4dYear:
_format = '%d'
def test_year(self, fmt=None, func=None):
fmt = fmt or self._format
func = func or self.yearstr
self.assertEqual(func(1), fmt % 1)
self.assertEqual(func(68), fmt % 68)
self.assertEqual(func(69), fmt % 69)
self.assertEqual(func(99), fmt % 99)
self.assertEqual(func(999), fmt % 999)
self.assertEqual(func(9999), fmt % 9999)
def test_large_year(self):
self.assertEqual(self.yearstr(12345).lstrip('+'), '12345')
self.assertEqual(self.yearstr(123456789).lstrip('+'), '123456789')
self.assertEqual(self.yearstr(TIME_MAXYEAR).lstrip('+'), str(TIME_MAXYEAR))
self.assertRaises(OverflowError, self.yearstr, TIME_MAXYEAR + 1)
def test_negative(self):
self.assertEqual(self.yearstr(-1), self._format % -1)
self.assertEqual(self.yearstr(-1234), '-1234')
self.assertEqual(self.yearstr(-123456), '-123456')
self.assertEqual(self.yearstr(-123456789), str(-123456789))
self.assertEqual(self.yearstr(-1234567890), str(-1234567890))
self.assertEqual(self.yearstr(TIME_MINYEAR), str(TIME_MINYEAR))
# Modules/timemodule.c checks for underflow
self.assertRaises(OverflowError, self.yearstr, TIME_MINYEAR - 1)
with self.assertRaises(OverflowError):
self.yearstr(-TIME_MAXYEAR - 1)
class TestAsctime4dyear(_TestAsctimeYear, _Test4dYear, unittest.TestCase):
pass
class TestStrftime4dyear(_TestStrftimeYear, _Test4dYear, unittest.TestCase):
pass
class TestPytime(unittest.TestCase):
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_localtime_timezone(self):
# Get the localtime and examine it for the offset and zone.
lt = time.localtime()
self.assertTrue(hasattr(lt, "tm_gmtoff"))
self.assertTrue(hasattr(lt, "tm_zone"))
# See if the offset and zone are similar to the module
# attributes.
if lt.tm_gmtoff is None:
self.assertTrue(not hasattr(time, "timezone"))
else:
self.assertEqual(lt.tm_gmtoff, -[time.timezone, time.altzone][lt.tm_isdst])
if lt.tm_zone is None:
self.assertTrue(not hasattr(time, "tzname"))
else:
self.assertEqual(lt.tm_zone, time.tzname[lt.tm_isdst])
# Try and make UNIX times from the localtime and a 9-tuple
# created from the localtime. Test to see that the times are
# the same.
t = time.mktime(lt); t9 = time.mktime(lt[:9])
self.assertEqual(t, t9)
# Make localtimes from the UNIX times and compare them to
# the original localtime, thus making a round trip.
new_lt = time.localtime(t); new_lt9 = time.localtime(t9)
self.assertEqual(new_lt, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt.tm_zone, lt.tm_zone)
self.assertEqual(new_lt9, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt9.tm_zone, lt.tm_zone)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_strptime_timezone(self):
t = time.strptime("UTC", "%Z")
self.assertEqual(t.tm_zone, 'UTC')
t = time.strptime("+0500", "%z")
self.assertEqual(t.tm_gmtoff, 5 * 3600)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_short_times(self):
import pickle
# Load a short time structure using pickle.
st = b"ctime\nstruct_time\np0\n((I2007\nI8\nI11\nI1\nI24\nI49\nI5\nI223\nI1\ntp1\n(dp2\ntp3\nRp4\n."
lt = pickle.loads(st)
self.assertIs(lt.tm_gmtoff, None)
self.assertIs(lt.tm_zone, None)
@unittest.skipIf(_testcapi is None, 'need the _testcapi module')
class CPyTimeTestCase:
"""
Base class to test the C _PyTime_t API.
"""
OVERFLOW_SECONDS = None
def setUp(self):
from _testcapi import SIZEOF_TIME_T
bits = SIZEOF_TIME_T * 8 - 1
self.time_t_min = -2 ** bits
self.time_t_max = 2 ** bits - 1
def time_t_filter(self, seconds):
return (self.time_t_min <= seconds <= self.time_t_max)
def _rounding_values(self, use_float):
"Build timestamps used to test rounding."
units = [1, US_TO_NS, MS_TO_NS, SEC_TO_NS]
if use_float:
# picoseconds are only tested to pytime_converter accepting floats
units.append(1e-3)
values = (
# small values
1, 2, 5, 7, 123, 456, 1234,
# 10^k - 1
9,
99,
999,
9999,
99999,
999999,
# test half even rounding near 0.5, 1.5, 2.5, 3.5, 4.5
499, 500, 501,
1499, 1500, 1501,
2500,
3500,
4500,
)
ns_timestamps = [0]
for unit in units:
for value in values:
ns = value * unit
ns_timestamps.extend((-ns, ns))
for pow2 in (0, 5, 10, 15, 22, 23, 24, 30, 33):
ns = (2 ** pow2) * SEC_TO_NS
ns_timestamps.extend((
-ns-1, -ns, -ns+1,
ns-1, ns, ns+1
))
for seconds in (_testcapi.INT_MIN, _testcapi.INT_MAX):
ns_timestamps.append(seconds * SEC_TO_NS)
if use_float:
# numbers with an exact representation in IEEE 754 (base 2)
for pow2 in (3, 7, 10, 15):
ns = 2.0 ** (-pow2)
ns_timestamps.extend((-ns, ns))
# seconds close to _PyTime_t type limit
ns = (2 ** 63 // SEC_TO_NS) * SEC_TO_NS
ns_timestamps.extend((-ns, ns))
return ns_timestamps
def _check_rounding(self, pytime_converter, expected_func,
use_float, unit_to_sec, value_filter=None):
def convert_values(ns_timestamps):
if use_float:
unit_to_ns = SEC_TO_NS / float(unit_to_sec)
values = [ns / unit_to_ns for ns in ns_timestamps]
else:
unit_to_ns = SEC_TO_NS // unit_to_sec
values = [ns // unit_to_ns for ns in ns_timestamps]
if value_filter:
values = filter(value_filter, values)
# remove duplicates and sort
return sorted(set(values))
# test rounding
ns_timestamps = self._rounding_values(use_float)
valid_values = convert_values(ns_timestamps)
for time_rnd, decimal_rnd in ROUNDING_MODES :
with decimal.localcontext() as context:
context.rounding = decimal_rnd
for value in valid_values:
debug_info = {'value': value, 'rounding': decimal_rnd}
try:
result = pytime_converter(value, time_rnd)
expected = expected_func(value)
except Exception as exc:
self.fail("Error on timestamp conversion: %s" % debug_info)
self.assertEqual(result,
expected,
debug_info)
# test overflow
ns = self.OVERFLOW_SECONDS * SEC_TO_NS
ns_timestamps = (-ns, ns)
overflow_values = convert_values(ns_timestamps)
for time_rnd, _ in ROUNDING_MODES :
for value in overflow_values:
debug_info = {'value': value, 'rounding': time_rnd}
with self.assertRaises(OverflowError, msg=debug_info):
pytime_converter(value, time_rnd)
def check_int_rounding(self, pytime_converter, expected_func,
unit_to_sec=1, value_filter=None):
self._check_rounding(pytime_converter, expected_func,
False, unit_to_sec, value_filter)
def check_float_rounding(self, pytime_converter, expected_func,
unit_to_sec=1, value_filter=None):
self._check_rounding(pytime_converter, expected_func,
True, unit_to_sec, value_filter)
def decimal_round(self, x):
d = decimal.Decimal(x)
d = d.quantize(1)
return int(d)
class TestCPyTime(CPyTimeTestCase, unittest.TestCase):
"""
Test the C _PyTime_t API.
"""
# _PyTime_t is a 64-bit signed integer
OVERFLOW_SECONDS = math.ceil((2**63 + 1) / SEC_TO_NS)
def test_FromSeconds(self):
from _testcapi import PyTime_FromSeconds
# PyTime_FromSeconds() expects a C int, reject values out of range
def c_int_filter(secs):
return (_testcapi.INT_MIN <= secs <= _testcapi.INT_MAX)
self.check_int_rounding(lambda secs, rnd: PyTime_FromSeconds(secs),
lambda secs: secs * SEC_TO_NS,
value_filter=c_int_filter)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(TypeError):
PyTime_FromSeconds(float('nan'))
def test_FromSecondsObject(self):
from _testcapi import PyTime_FromSecondsObject
self.check_int_rounding(
PyTime_FromSecondsObject,
lambda secs: secs * SEC_TO_NS)
self.check_float_rounding(
PyTime_FromSecondsObject,
lambda ns: self.decimal_round(ns * SEC_TO_NS))
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(ValueError):
PyTime_FromSecondsObject(float('nan'), time_rnd)
def test_AsSecondsDouble(self):
from _testcapi import PyTime_AsSecondsDouble
def float_converter(ns):
if abs(ns) % SEC_TO_NS == 0:
return float(ns // SEC_TO_NS)
else:
return float(ns) / SEC_TO_NS
self.check_int_rounding(lambda ns, rnd: PyTime_AsSecondsDouble(ns),
float_converter,
NS_TO_SEC)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(TypeError):
PyTime_AsSecondsDouble(float('nan'))
def create_decimal_converter(self, denominator):
denom = decimal.Decimal(denominator)
def converter(value):
d = decimal.Decimal(value) / denom
return self.decimal_round(d)
return converter
def test_AsTimeval(self):
from _testcapi import PyTime_AsTimeval
us_converter = self.create_decimal_converter(US_TO_NS)
def timeval_converter(ns):
us = us_converter(ns)
return divmod(us, SEC_TO_US)
if sys.platform == 'win32':
from _testcapi import LONG_MIN, LONG_MAX
# On Windows, timeval.tv_sec type is a C long
def seconds_filter(secs):
return LONG_MIN <= secs <= LONG_MAX
else:
seconds_filter = self.time_t_filter
self.check_int_rounding(PyTime_AsTimeval,
timeval_converter,
NS_TO_SEC,
value_filter=seconds_filter)
@unittest.skipUnless(hasattr(_testcapi, 'PyTime_AsTimespec'),
'need _testcapi.PyTime_AsTimespec')
def test_AsTimespec(self):
from _testcapi import PyTime_AsTimespec
def timespec_converter(ns):
return divmod(ns, SEC_TO_NS)
self.check_int_rounding(lambda ns, rnd: PyTime_AsTimespec(ns),
timespec_converter,
NS_TO_SEC,
value_filter=self.time_t_filter)
def test_AsMilliseconds(self):
from _testcapi import PyTime_AsMilliseconds
self.check_int_rounding(PyTime_AsMilliseconds,
self.create_decimal_converter(MS_TO_NS),
NS_TO_SEC)
def test_AsMicroseconds(self):
from _testcapi import PyTime_AsMicroseconds
self.check_int_rounding(PyTime_AsMicroseconds,
self.create_decimal_converter(US_TO_NS),
NS_TO_SEC)
class TestOldPyTime(CPyTimeTestCase, unittest.TestCase):
"""
Test the old C _PyTime_t API: _PyTime_ObjectToXXX() functions.
"""
# time_t is a 32-bit or 64-bit signed integer
OVERFLOW_SECONDS = 2 ** 64
def test_object_to_time_t(self):
from _testcapi import pytime_object_to_time_t
self.check_int_rounding(pytime_object_to_time_t,
lambda secs: secs,
value_filter=self.time_t_filter)
self.check_float_rounding(pytime_object_to_time_t,
self.decimal_round,
value_filter=self.time_t_filter)
def create_converter(self, sec_to_unit):
def converter(secs):
floatpart, intpart = math.modf(secs)
intpart = int(intpart)
floatpart *= sec_to_unit
floatpart = self.decimal_round(floatpart)
if floatpart < 0:
floatpart += sec_to_unit
intpart -= 1
elif floatpart >= sec_to_unit:
floatpart -= sec_to_unit
intpart += 1
return (intpart, floatpart)
return converter
def test_object_to_timeval(self):
from _testcapi import pytime_object_to_timeval
self.check_int_rounding(pytime_object_to_timeval,
lambda secs: (secs, 0),
value_filter=self.time_t_filter)
self.check_float_rounding(pytime_object_to_timeval,
self.create_converter(SEC_TO_US),
value_filter=self.time_t_filter)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(ValueError):
pytime_object_to_timeval(float('nan'), time_rnd)
def test_object_to_timespec(self):
from _testcapi import pytime_object_to_timespec
self.check_int_rounding(pytime_object_to_timespec,
lambda secs: (secs, 0),
value_filter=self.time_t_filter)
self.check_float_rounding(pytime_object_to_timespec,
self.create_converter(SEC_TO_NS),
value_filter=self.time_t_filter)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(ValueError):
pytime_object_to_timespec(float('nan'), time_rnd)
if __name__ == "__main__":
unittest.main()
|
video_recorder.py
|
import random
import csv
import io
import os
import threading
import time
import logging
import sys
from sys import platform
from datetime import datetime
try: #Depende de la version de python
from Queue import Queue, Empty
except:
from queue import Queue, Empty
import numpy as np
import cv2
from configs import CONFIGS, CAMERA, SERVER_URL
from datetime import datetime
import subprocess
import json
import math
import requests
from requests import ConnectionError
from glob import glob
import shutil
WEBSERVER=CONFIGS["rap_server"]
SAVE_CSV_URL = "http://"+WEBSERVER+"/resultados/save_csv/"
SAVE_IMAGES_URL = "http://"+WEBSERVER+"/resultados/save_images/"
FFMPEG_BIN="/usr/bin/ffmpeg"
#Antropometric constant values of the human head.
#Found on wikipedia and on:
# "Head-and-Face Anthropometric Survey of U.S. Respirator Users"
#
#X-Y-Z with X pointing forward and Y on the left.
#The X-Y-Z coordinates used are like the standard
# coordinates of ROS (robotic operative system)
P3D_RIGHT_SIDE = np.float32([-100.0, -77.5, -5.0]) #0
P3D_GONION_RIGHT = np.float32([-110.0, -77.5, -85.0]) #4
P3D_MENTON = np.float32([0.0, 0.0, -122.7]) #8
P3D_GONION_LEFT = np.float32([-110.0, 77.5, -85.0]) #12
P3D_LEFT_SIDE = np.float32([-100.0, 77.5, -5.0]) #16
P3D_FRONTAL_BREADTH_RIGHT = np.float32([-20.0, -56.1, 10.0]) #17
P3D_FRONTAL_BREADTH_LEFT = np.float32([-20.0, 56.1, 10.0]) #26
P3D_SELLION = np.float32([0.0, 0.0, 0.0]) #27
P3D_NOSE = np.float32([21.1, 0.0, -48.0]) #30
P3D_SUB_NOSE = np.float32([5.0, 0.0, -52.0]) #33
P3D_RIGHT_EYE = np.float32([-20.0, -65.5,-5.0]) #36
P3D_RIGHT_TEAR = np.float32([-10.0, -40.5,-5.0]) #39
P3D_LEFT_TEAR = np.float32([-10.0, 40.5,-5.0]) #42
P3D_LEFT_EYE = np.float32([-20.0, 65.5,-5.0]) #45
#P3D_LIP_RIGHT = numpy.float32([-20.0, 65.5,-5.0]) #48
#P3D_LIP_LEFT = numpy.float32([-20.0, 65.5,-5.0]) #54
P3D_STOMION = np.float32([10.0, 0.0, -75.0]) #62
TRACKED_POINTS = (0, 4, 8, 12, 16, 17, 26, 27, 30, 33, 36, 39, 42, 45, 62)
HAND_MID_SPINE_THRESHOLD=100
HAND_DISTANCE_THRESHOLD=80
cam_w = 600
cam_h = 480
c_x = cam_w / 2
c_y = cam_h / 2
f_x = c_x / np.tan(60/2 * np.pi / 180)
f_y = f_x
camera_matrix = np.float32([[f_x, 0.0, c_x],
[0.0, f_y, c_y],
[0.0, 0.0, 1.0] ])
camera_distortion = np.float32([0.0, 0.0, 0.0, 0.0, 0.0])
landmarks_3D = np.float32([P3D_RIGHT_SIDE,
P3D_GONION_RIGHT,
P3D_MENTON,
P3D_GONION_LEFT,
P3D_LEFT_SIDE,
P3D_FRONTAL_BREADTH_RIGHT,
P3D_FRONTAL_BREADTH_LEFT,
P3D_SELLION,
P3D_NOSE,
P3D_SUB_NOSE,
P3D_RIGHT_EYE,
P3D_RIGHT_TEAR,
P3D_LEFT_TEAR,
P3D_LEFT_EYE,
P3D_STOMION])
FPS = CAMERA['framerate']
logger = logging.getLogger("Camera")
logger.setLevel(logging.DEBUG)
class VideoRecorder:
"""
VideoRecorder que utiliza opencv para leer datos de camara usb.
"""
def __init__(self, on_error):
"""
on_error: callback
"""
self.camera = cv2.VideoCapture(0)
if not self.camera.isOpened():
raise IOError("Error al reconocer la camara USB")
self.set_camera_params()
# print self.camera.get(cv2.CAP_PROP_FRAME_WIDTH), self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT)
#self.channel = grpc.insecure_channel(SERVER_URL)
self.record_channel = None
# self.grpc_stub = FeatureExtractionApi_pb2_grpc.FeatureExtractionStub(channel)
self.recording_stop = True
self.image_queue = Queue()
self.count = 0
self.sent_count = 0
self.grabbing = False
self.on_error = on_error
# Starting Camera
logger.debug("Camera started")
def set_camera_params(self):
self.camera.set(3,600)
self.camera.set(4,480)
def capture_continuous(self, filename):
"""
Captura frames en un loop y los encola en image_queue
"""
logger.debug("capture continuous")
self.count = 1
self.grabbing = True
self.filename = filename
self.createFolders(filename)
self.csv_file=open(str(filename)+'/result.csv', mode='w')
self.resultfile = csv.writer(self.csv_file, delimiter=',')
self.resultfile.writerow(["frame","looks","positions"])
fourcc = cv2.VideoWriter_fourcc(*'H264')
self.videoFile = cv2.VideoWriter(filename+'/video.avi',fourcc, 5.0, (int(self.camera.get(3)),int(self.camera.get(4))))
self.imgDictionary={}
self.lastBodyPosture="none"
self.lastHeadPosture="none"
while True:
start = time.time()
ret, frame = self.camera.read()
#frame=cv2.flip(frame,0)
#bytesImg= cv2.imencode(".jpg",frame)[1].tostring()
self.image_queue.put(frame)
if self.recording_stop:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
self.count += 1
time.sleep(max(1./FPS - (time.time() - start), 0))
self.grabbing = False
def generate_videos_iterator(self):
"""
Iterator. lee frames de cola image_queue
"""
logger.debug("generate video iterator")
self.sent_count = 1
while not self.recording_stop or not self.image_queue.empty() or self.grabbing:
try:
frame= self.image_queue.get(block=True, timeout=1)
self.videoFile.write(frame)
cv2.imwrite(self.filename+"/tempFrames/frame"+str(self.sent_count)+".png",frame)
self.image_queue.task_done()
print ("sent",self.sent_count, "of", self.count, "captured")
self.sent_count += 1
except Empty as ex:
logger.error("No data in image queue")
logger.debug("Done generating images")
self.videoFile.release()
def select_biggest_skeleton(self,keypoints):
max_id = 0;
max_size = 0;
for i in range(0,keypoints.shape[0]):
rhip_y = keypoints[i, 8, 1]
lhip_y = keypoints[i, 11, 1]
neck_y = keypoints[i, 1, 1]
size = 0
if (neck_y != 0 and (rhip_y != 0 or lhip_y != 0)):
size = (rhip_y + lhip_y) / 2 - neck_y
if (size > max_size):
max_size = size
max_id = i
return max_id
def headPosture(self,fk,bodyId):
landmarks_2D = np.zeros((len(TRACKED_POINTS),2), dtype=np.float32)
counter = 0
for point in TRACKED_POINTS:
landmarks_2D[counter] = [fk[bodyId][point][0], fk[bodyId][point][1]]
counter += 1
retval, rvec, tvec = cv2.solvePnP(landmarks_3D,
landmarks_2D,
camera_matrix, camera_distortion)
rmat = cv2.Rodrigues(rvec)[0]
ypr = -180*self.yawpitchrolldecomposition(rmat)/math.pi
ypr[1,0] = ypr[1,0]+90
if ypr[0,0]>75 and ypr[0,0]<105:
if ypr[1,0]>-10 and ypr[1,0]<10:
return "center"
else:
if ypr[1,0]>=10:
return "up"
else:
return "down"
else:
if ypr[0,0]>=105:
return "right"
else:
return "left"
def headPostureSkeleton(self,keypoints,bodyId):
rshoulder_x = keypoints[bodyId][2][0]
lshoulder_x = keypoints[bodyId][5][0]
mhip_y = keypoints[bodyId][8][1]
neck_x = keypoints[bodyId][1][0]
neck_y = keypoints[bodyId][1][1]
nose_x = keypoints[bodyId][0][0]
nose_y = keypoints[bodyId][0][1]
reye_x = keypoints[bodyId][15][0]
reye_y = keypoints[bodyId][15][1]
leye_x = keypoints[bodyId][16][0]
leye_y = keypoints[bodyId][16][1]
rear_x = keypoints[bodyId][17][0]
rear_y = keypoints[bodyId][17][1]
lear_x = keypoints[bodyId][18][0]
lear_y = keypoints[bodyId][18][1]
rdist=reye_x-rear_x
ldist=lear_x-leye_x
difference=ldist-rdist
normalizer= (mhip_y-neck_y)/13
average_ear=(rear_y+lear_y)/2
average_eye=(reye_y+leye_y)/2
distance_eyes=(leye_x-reye_x)
distance_leye_nose=(leye_x-nose_x)
distance_reye_nose=(nose_x-reye_x)
atitude=average_ear-nose_y
print("Entrando a los ifs")
print(rdist,ldist,difference,normalizer,average_ear,nose_y,atitude,average_eye,distance_eyes,distance_leye_nose,distance_reye_nose)
if rshoulder_x != 0 and lshoulder_x != 0 and lshoulder_x < rshoulder_x: #Persona de espaldas
return "tv"
if rear_x==0 and abs(difference)>normalizer:
return "left"
if lear_x==0 and abs(difference)>normalizer:
return "right"
if difference>normalizer:
return "right"
else:
if difference<-(normalizer):
return "left"
if atitude>((normalizer/3)):
return "up"
else:
if atitude<-(normalizer/1.2):
return "down"
return "center"
def bodyPosture(self,keypoints, person_index, face_orientation, head_height):
rwrist_y = keypoints[person_index][4][1]
rwrist_x = keypoints[person_index][4][0]
lwrist_y = keypoints[person_index][7][1]
lwrist_x = keypoints[person_index][7][0]
mhip_y = keypoints[person_index][8][1]
lhip_y = keypoints[person_index][11][1]
neck_y = keypoints[person_index][1][1]
nose_y = keypoints[person_index][0][1]
rshoulder_x = keypoints[person_index][2][0]
lshoulder_x = keypoints[person_index][5][0]
if rshoulder_x != 0 and lshoulder_x != 0 and lshoulder_x < rshoulder_x: #Persona de espaldas
return "bad"
if mhip_y == 0:
return "NOT_DETECTED"
if lwrist_y == 0:
lwrist_y = rwrist_y
if rwrist_y == 0:
rwrist_y = lwrist_y
if rwrist_y == 0:
return "bad"
hand_distance_threshold = neck_y - nose_y
spinebase = mhip_y
spinemid = ((3*spinebase) + neck_y)/4
#spinemid = spinebase-3*hand_distance_threshold
normalizer = 0
if head_height > 0:
normalizer= head_height
else:
normalizer=HAND_MID_SPINE_THRESHOLD
#if lwrist_y < (spinemid - (HAND_MID_SPINE_THRESHOLD/head_height)) or rwrist_y < (spinemid - (HAND_MID_SPINE_THRESHOLD/head_height)):
if lwrist_y < spinemid or rwrist_y < spinemid:
if rwrist_x != 0 and lwrist_x != 0 and abs(rwrist_x - lwrist_x) < hand_distance_threshold:
return "bad"
return "good"
return "bad"
def writeToRapCsv(self, frame, posture, face):
if posture != "good":
postureValue = 0
else:
postureValue = 1
self.resultfile.writerow([frame, face, postureValue])
def captureFacePoseImages(self, img, actualOrientation, lastOrientation, mode, x, y, width, height):
#Mode 0 Face, Mode 1 Pose
if (mode != 0 and mode != 1):
return
if actualOrientation=="NOT_DETECTED" :
return
if actualOrientation!=lastOrientation:
return
if actualOrientation in self.imgDictionary.keys():
countImage=self.imgDictionary[actualOrientation]
else:
countImage=0
countImage=countImage+1
self.imgDictionary[actualOrientation]=countImage
if (mode == 0 and height>1 and width>1):
# Condiciones que debe cumplir el ROI
# box within the image plane
img=img[int(y):int(y)+int(height),int(x):int(x)+int(width)]
imgheight, imgwidth, channels = img.shape
if imgheight>0 and imgwidth>0:
img=cv2.resize(img, (200, 200))
cv2.imwrite(self.filename+"/"+ actualOrientation+"/img" + str(countImage)+".jpg", img)
def processVideo(videos_iterator):
while True:
for frame in videos_iterator:
pass
def start_recording(self, filename):
"""
Empieza grabacion. Crea hilo para captura y canal grpc para envio
"""
logger.info("Start recording")
try:
#self.record_channel = grpc.insecure_channel(SERVER_URL)
#if not self.ping():
# raise
#self.grpc_stub = FeatureExtractionApi_pb2_grpc.FeatureExtractionStub(self.record_channel)
threading.Thread(target=self.capture_continuous, args=(filename, )).start()
videos_iterator = self.generate_videos_iterator()
worker = threading.Thread(target=self.processVideo, args=(videos_iterator))
worker.setDaemon(True)
worker.start()
#logger.debug(response)
#self.record_channel.close()
self.record_channel = None
except Exception as e:
logger.exception("start_recording")
logger.error("Murio grpc")
self.on_error()
def record(self):
"""
Crea hilo para que inicie grabacion
"""
filename=CONFIGS["session"]
self.recording_stop = False
self.image_queue = Queue()
threading.Thread(target=self.start_recording, args=(filename, )).start()
def stop_record(self, callback=None):
"""
Detiene grabacion de video
callback: se ejecuta una vez ha finalizado el envio de todos los frames a servidor grpc
"""
self.recording_stop = True
time.sleep(5)
self.image_queue.join()
self.channel = None
CONFIGS["session"] = '0'
if callback:
callback()
#subprocess.call(["/home/nvidia/openpose/build/examples/openpose/openpose.bin", "--write_json", self.filename+"/output/", "--display", "0", "--render_pose", "0","--face","--image_dir", self.filename+"/tempFrames/", "-net_resolution","128x96" ])
subprocess.call(["/home/nvidia/openpose/build/examples/openpose/openpose.bin", "--write_json", self.filename+"/output/", "--display", "0", "--image_dir", self.filename+"/tempFrames/", "--write_video", self.filename+"/result.avi","--write_video_fps", "5" ])
#subprocess.call(["/home/nvidia/openpose/build/examples/openpose/openpose.bin", "--write_json", self.filename+"/output/", "--display", "0", "--render_pose", "0","--image_dir", self.filename+"/tempFrames/" ])
self.featureExtraction()
self.sendData()
def getHeadRectangle(self,keypoints,bodyId):
nose_x = keypoints[bodyId][0][0]
nose_y = keypoints[bodyId][0][1]
mhip_y = keypoints[bodyId][8][1]
normalizer=(mhip_y-nose_y)/4
if nose_y==0:
return 0,0,0,0
else:
x=nose_x-normalizer
y=nose_y-normalizer
width=normalizer*2
height=normalizer*2
return x,y,width,height
def featureExtraction(self):
for i in range(1,self.count):
f=open(self.filename+"/output/frame"+str(i)+"_keypoints.json")
frame=cv2.imread(self.filename+"/tempFrames/frame"+str(i)+".png")
data = json.load(f)
f.close()
people=data["people"]
posture=[]
#face=[]
for person in people:
posture.append(person["pose_keypoints_2d"])
#face.append(person["face_keypoints_2d"])
keypoints=self.convertToArray(posture)
#fk=self.convertToArray(face)
if not(posture is None) and keypoints.shape[0]>0:
bodyId=self.select_biggest_skeleton(keypoints)
#headx,heady,headw,headh=cv2.boundingRect(np.array(fk[bodyId], dtype=np.int32))
headx,heady,headw,headh=self.getHeadRectangle(keypoints,bodyId)
#(x,y),radius = cv2.minEnclosingCircle(np.array(fk[bodyId], dtype=np.int32))
#print(bodyId,radius)
head_height=headh
#if (len(rectangles) > 0 and rectangles[bodyId].y>0):
if(True):
#hp=self.headPosture(fk,bodyId)
hp=self.headPostureSkeleton(keypoints,bodyId)
bp=self.bodyPosture(keypoints,bodyId,hp,head_height)
self.writeToRapCsv(i, bp, hp)
self.captureFacePoseImages(frame, hp, self.lastHeadPosture, 0, headx, heady, headw, headh)
self.captureFacePoseImages(frame, bp, self.lastBodyPosture, 1, headx, heady, headw, headh)
self.lastHeadPosture=hp
self.lastBodyPosture=bp
print(i,hp,bp)
def get_progress(self):
try:
value=int(self.sent_count * 100.0 / self.count)
if value>100:
value=100
return "{} %".format(value)
except:
return "0 %"
# return "{}/{}".format(self.sent_count, self.count)
def clean(self):
self.camera.release()
logger.debug("Camera released")
# self.camera.close()
def convert_to_mp4(self):
filename_mp4 = self.filename.split(".")[0]+".mp4"
logger.info("file .h264 saved.. Transforming to mp4...")
os.system("MP4Box -fps 30 -add "+ self.filename + " " + filename_mp4)
logger.info("File converted to mp4")
def createFolders(self,path):
try:
shutil.rmtree(path)
except:
print("Cannot delete")
try:
os.mkdir(path)
os.mkdir(path + "/center")
os.mkdir(path + "/up")
os.mkdir(path + "/down")
os.mkdir(path + "/right")
os.mkdir(path + "/left")
os.mkdir(path + "/tv")
os.mkdir(path + "/good")
os.mkdir(path + "/bad")
os.mkdir(path + "/tempFrames")
except:
print("Directories already created")
def sendData(self):
self.csv_file.close()
self.selectRandomImages(3)
self.send_results()
self.send_images()
def selectRandomImages(self, maxImages):
path = self.filename
for pose in self.imgDictionary.keys():
value = self.imgDictionary[pose]
if value>maxImages:
randomValues=random.sample(range(1, value+1), maxImages)
else:
randomValues=range(1,value+1)
for number in randomValues:
img = cv2.imread(path+"/"+pose+"/img"+ str(number) + ".jpg")
cv2.imwrite(path+ "/img" +str(number) +"_"+pose+ ".jpg", img)
def send_results(self):
try:
id = int(self.filename)
except:
print("student ID error")
return
csv_path = os.path.join(self.filename+"/result.csv")
csv_file = open(csv_path,"rb");
#print csv_path
values = {"resultado_id":id}
files = {"csvfile":csv_file}
try:
response = requests.post(SAVE_CSV_URL, data=values, files=files)
#print "Send results:",response.status_code
return response.status_code
except ConnectionError as e:
print(e)
#print "---------Error al conectarse con el servidor--------------- "
#print "Sent results:",400
return 400
def send_images(self):
try:
id = int(self.filename)
except:
print("Student Id error")
return
imagesPath = self.filename
values = {"resultado_id":id}
files = {}
image_type = "img_type_"
classifier = "classifier_"
filename = "filename_"
fileString = "img_"
count = 0
for actualfile in glob(imagesPath+"/*.jpg"):
files["{}{:d}".format(fileString,count)] = open(actualfile, "rb")
actualfile = actualfile.split('/')
actualFileName = actualfile[-1]
actual_classifier = actualFileName.split('_')[-1].split('.')[0]
if(actual_classifier=="good" or actual_classifier=="bad" ):
values["{}{:d}".format(image_type,count)] = "p"
else:
values["{}{:d}".format(image_type,count)] = "m"
values["{}{:d}".format(classifier,count)] = actual_classifier
values["{}{:d}".format(filename,count)] = actualFileName
print(actualFileName)
count += 1
values["num_images"] = count
try:
response = requests.post(SAVE_IMAGES_URL, data=values, files=files)
print("Sent images:",response.status_code)
except ConnectionError as e:
#print "---------Error al conectarse con el servidor--------------- "
#print "Sent images:",400
print(e)
return 400
for key,value in files.items(): value.close()
#for actualfile in glob(imagesPath+"/*.jpg"):os.remove(actualfile)
#print os.path.join(imagesPath,"video.avi")
# Transforma y envia el video en mp4
command = [FFMPEG_BIN,'-i',os.path.join(imagesPath,"result.avi"),os.path.join(imagesPath,"video.mp4")]
print(command)
FNULL = open(os.devnull, 'w')
print ("Converting to mp4..",)
join_process = subprocess.Popen(command, stdout=FNULL, stderr=subprocess.STDOUT, bufsize=10**8)
join_process.communicate()
FNULL.close()
print("Done")
subprocess.call(["./sendVideo", os.path.join(imagesPath,"video.mp4"),"root@"+WEBSERVER+":/home/rap/RAP/rap_v2/static/resultados/"+str(id)+"/video/video_complete.mp4"])
response2 = requests.get("http://"+WEBSERVER+"/resultados/process_video/?resultado_id="+str(id))
print ("Process media: ", response2.status_code)
#print video_status
#os.remove(os.path.join(imagesPath,"video.mp4"))
return response.status_code
def ping(self):
"""
Verifica que exista conexion con servidor grpc
"""
return True
if self.channel is None:
self.channel = grpc.insecure_channel(SERVER_URL)
try:
grpc.channel_ready_future(self.channel).result(timeout=1)
logger.info("Ping")
return True
except grpc.FutureTimeoutError as e:
logger.error("Couldnt connect to GRPC SERVER")
self.channel.close()
self.channel = None
return False
def yawpitchrolldecomposition(self,R):
sin_x = math.sqrt(R[2,0] * R[2,0] + R[2,1] * R[2,1])
validity = sin_x < 1e-6
if not validity:
z1 = math.atan2(R[2,0], R[2,1]) # around z1-axis
x = math.atan2(sin_x, R[2,2]) # around x-axis
z2 = math.atan2(R[0,2], -R[1,2]) # around z2-axis
else: # gimbal lock
z1 = 0 # around z1-axis
x = math.atan2(sin_x, R[2,2]) # around x-axis
z2 = 0 # around z2-axis
return np.array([[z1], [x], [z2]])
def convertToArray(self,klist):
personList=[]
for person in klist:
pointList=[]
for i in xrange(0,len(person)-2,3):
pointList.append([person[i],person[i+1]])
personList.append(pointList)
return np.asarray(personList)
if __name__ == "__main__":
vid_recorder = VideoRecorder()
print ("Set vid recorder")
# vid_recorder.camera.wait_recording(5)
time.sleep(2)
start = datetime.now()
print("Start" , start)
print(start)
vid_recorder.record()
# vid_recorder.camera.wait_recording(2)
# vid_recorder.camera.capture("foo.jpg", use_video_port=True)
# print ("Pic taken")
# vid_recorder.camera.wait_recording(30)
time.sleep(30)
vid_recorder.stop_record()
vid_recorder.clean()
|
calculator.py
|
#!/usr/bin/env python3
import sys
from multiprocessing import Queue,Process,Lock
from datetime import datetime
import getopt
import configparser
class Config(object):
def __init__(self,filename,arg='DEFAULT'):
self._filename = filename
self._arg = arg
self._obj = configparser.ConfigParser(strict=False)
self._obj.read(self._filename)
@property
def basel(self):
return self._obj.getfloat(self._arg,'JiShuL')
@property
def baseh(self):
return self._obj.getfloat(self._arg,'JiShuH')
@property
def soinsurp(self):
sum = 0
for i in ['YangLao','GongJiJin','ShengYu','GongShang','ShiYe','YiLiao']:
sum += self._obj.getfloat(self._arg,i)
return sum
class UserData(object):
def __init__(self,userdatafile):
self._userdatafile = userdatafile
@property
def userdata(self):
userdata = {}
with open(self._userdatafile) as file:
for line in file:
s = line.split(',')
fkey = s[0].strip()
fvalue = s[1].strip()
userdata[fkey] = float(fvalue)
return userdata
class Salary(object):
#bftax is salary before the pitax
#soinsurp is socail insur pecentage
#basel is the lowest base
#baseh is the hightest base
def __init__(self,bftax,soinsurp,basel,baseh):
self._bftax = bftax
self._soinsurp = soinsurp
self._basel = basel
self._baseh = baseh
@property
def soinsur(self):
if self._bftax <= self._basel:
return self._basel * self._soinsurp
elif self._bftax >= self._baseh:
return self._baseh * self._soinsurp
else:
return self._bftax * self._soinsurp
@property
def pitax(self):
taxbase = self._bftax - self.soinsur - 3500
if taxbase <= 0:
return 0
elif taxbase > 0 and taxbase <= 1500:
return taxbase * 0.03
elif taxbase > 1500 and taxbase <= 4500:
return (taxbase * 0.1 - 105)
elif taxbase > 4500 and taxbase <= 9000:
return (taxbase * 0.2 - 555)
elif taxbase > 9000 and taxbase <= 35000:
return (taxbase * 0.25 - 1005)
elif taxbase > 35000 and taxbase <= 55000:
return (taxbase * 0.3 - 2755)
elif taxbase > 55000 and taxbase <= 80000:
return (taxbase * 0.35 - 5505)
else:
return (taxbase * 0.45 - 13505)
@property
def aftax(self):
return self._bftax - self.soinsur - self.pitax
que1 = Queue()
que2 = Queue()
def putda_func(arg,lock):
#
user_inst = UserData(arg)
g = [ (k,v) for k,v in\
user_inst.userdata.items()]
for i in g:
with lock:
que1.put(i)
def comp_func(soinsurp,basel,baseh,lock):
while True:
i = que1.get()
bftax = i[1]
salary = Salary(bftax,soinsurp,basel,baseh)
sal_list = [i[0],i[1],salary.soinsur,salary.pitax,\
salary.aftax]
with lock:
que2.put(sal_list)
if que1.empty():
break
def outfi_func(arg):
while True:
lis = que2.get()
with open(arg,'a') as file:
file.write(lis[0])
for i in lis[1:]:
file.write(','+'{:.2f}'.format(i))
t = datetime.now()
t_str = datetime.strftime(t,'%Y-%m-%d %H:%M:%S')
file.write(',' + t_str)
file.write('\n')
if que2.empty():
break
def usage():
line ='Usage: ' + sys.argv[0] + ' -C cityname -c configfile -d userdata -o resultdata'
print(line)
def main():
try:
opts,args = getopt.getopt(sys.argv[1:],'ho:d:C:c:',['help',])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
cityname = 'DEFAULT'
userfile = None
configfile = None
outfile = None
try:
for o,a in opts:
if o in ('-h','--help'):
usage()
sys.exit()
if o == '-o':
outfile = a
elif o == '-C':
cityname = a
elif o == '-d':
userfile = a
elif o == '-c':
configfile = a
else:
raise NameError
config = Config(configfile,cityname.upper())
lo1 = Lock()
lo2 = Lock()
Process(target=putda_func,args=(userfile,lo1)).start()
Process(target=comp_func, args=(config.soinsurp,\
config.basel,config.baseh,lo2)).start()
Process(target=outfi_func, args=(outfile,)).start()
except NameError as err:
usage()
print(err)
sys.exit(2)
if __name__ == '__main__':
main()
|
master.py
|
#!/usr/bin/env python
try:
from gpiozero import DigitalOutputDevice
from gpiozero import DigitalInputDevice, Button
RPI_OK = True
except:
RPI_OK = False
pass
import webapp
from signal import pause
EXPLODE_TIME = 18
EXPLODE_TRACK = 3
DIM1_INTRO = 50
DIM2_INTRO = 40
DIM1_TRAIN = 35
DIM2_TRAIN = 60
DIM1_NORMAL = 60
DIM2_NORMAL = 40
import argparse
import logging
import threading
import bisect
import time
import sys
import os
import rs485
from bus import Bus
import node
class Snake:
PIN_SNAKE_EN = 2
PIN_EASTER_EN = 3
PIN_SNAKE_DONE = 25
def __init__(self):
self.snakeOnPin = DigitalOutputDevice(self.PIN_SNAKE_EN, active_high = False)
self.snakeDonePin = DigitalInputDevice(self.PIN_SNAKE_DONE, pull_up = False)
self.snakeDonePin.when_activated = self.onDone
self.done = False
self.enabled = False
return
def onDone(self):
self.done = True
self.snakeOnPin.off()
def isAlive(self):
return True
def getDone(self, newValue = None):
if (newValue != None):
self.done = newValue
if self.snakeDonePin.is_active:
self.done = True
return self.done
def getEnabled(self, newValue = None):
if (newValue != None):
self.enabled = newValue
if self.enabled and not self.done:
self.snakeOnPin.on()
else:
self.snakeOnPin.off()
return self.enabled
def update(self):
self.getDone()
return
def getAllValues(self):
values = {}
values['done'] = self.done
values['alive'] = self.isAlive()
return values
class RPi:
PIN_NODE_RST = 27
PIN_START_BTN = 23
PIN_EXIT_EN = 24
def __init__(self, onStart = None, onStop = None):
if not RPI_OK: return
self.rstPin = DigitalOutputDevice(self.PIN_NODE_RST)
#self.btnPin = DigitalInputDevice(self.PIN_START_BTN)
self.btnPin = Button(self.PIN_START_BTN)
self.exitPin = DigitalOutputDevice(self.PIN_EXIT_EN, active_high = True)
#self.outPin.source = self.btnPin.values
if onStart: self.btnPin.when_pressed = onStart
if onStop: self.btnPin.when_released = onStop
return
def resetNetwork(self):
if not RPI_OK: return
self.rstPin.blink(n = 1, on_time = 0.1, off_time = 0.1, background = True)
return
def openDoors(self, on):
if not RPI_OK: return
if on: self.exitPin.on()
else: self.exitPin.off()
return
def gameEnabled(self):
if not RPI_OK: return False
return self.btnPin.is_active
class Master:
def __init__(self, bus, script = None):
self.bus = bus
#self.rpi = RPi()
self.rpi = RPi(self.onGameStart, self.onGamePause)
try:
self.bomb = node.Bomb(bus)
self.player = node.Player(bus)
self.dimmer = node.Dimmer(bus)
self.snake = Snake()
self.nodeMap = {
'BOMB' : self.bomb,
'VALVE' : node.Valve(bus),
'FLOOR' : node.Floor(bus),
'RFID' : node.RFID(bus),
'KEY' : node.Key(bus),
'PBX_Task1' : node.PBX_Task1(bus),
'PBX_Task2' : node.PBX_Task2(bus),
'P2K' : node.P2K(bus),
'MAP' : node.Map(bus),
'WC' : node.WC(bus),
'SNAKE' : self.snake
}
except Exception as e:
logging.warning("Failed to instantiate node objects (%s)" % str(e))
self.minutes = 60
self.seconds = 0
self.script = script
self.timeTable = []
try:
self.readScript()
logging.info("Loaded %d entries in script" % (len(self.timeTable)))
except Exception as e:
logging.warning("Exception while reading script (%s)" % str(e))
self.gameState = 'service'
self.rpi.openDoors(False)
#self.setGameState('service')
def setDone(self, address, isDone):
if address in self.nodeMap:
if isDone == False:
logging.info("Resetting %s" % address)
elif isDone == True:
logging.info("Finishing %s" % address)
self.nodeMap[address].getDone(isDone)
def setTime(self, minutes, seconds):
logging.info("Setting time to %02d:%02d" % (minutes, seconds) )
self.bomb.setTime( minutes, seconds )
def getStatus(self):
response = {}
response['status'] = self.gameState
response['gameEnabled'] = self.rpi.gameEnabled()
response['doorsOpen'] = False
for name in self.nodeMap:
values = self.nodeMap[name].getAllValues()
response[name] = values
return response
def getTime(self):
return 60 - (self.minutes + self.seconds / 60.0)
def onGameStart(self):
self.setGameState("active")
def onGamePause(self):
if self.gameState != "service":
self.setGameState("pause")
pass
def setGameState(self, newState):
if newState == 'service':
self.rpi.resetNetwork()
time.sleep(3.5)
self.rpi.openDoors(False)
self.snake.getDone(False)
self.minutes = 60
self.seconds = 0
self.setTime(self.minutes, self.seconds)
#self.player.setTrack1(0)
#self.player.setTrack2(0)
#self.player.setTrack3(0)
#self.bomb.getDone(False)
#self.dimmer.getDone(False)
#self.dimmer.setDimmer1(100)
#self.dimmer.setDimmer2(100)
elif newState == 'active':
if self.gameState != 'pause':
return
self.dimmer.setDimmer1(self.lastDimmer1)
self.dimmer.setDimmer2(self.lastDimmer2)
self.bomb.setEnabled(True)
pass
elif newState == 'pause':
if self.gameState == 'active':
self.lastDimmer1 = self.dimmer.getDimmer1()
self.lastDimmer2 = self.dimmer.getDimmer2()
self.bomb.setEnabled(False)
self.dimmer.setDimmer1(100)
self.dimmer.setDimmer2(100)
else:
self.lastDimmer1 = self.dimmer.setDimmer1(DIM1_INTRO)
self.lastDimmer2 = self.dimmer.setDimmer2(DIM2_INTRO)
self.dimmer.setDimmer3(0)
self.dimmer.setDimmer4(0)
pass
elif newState == 'complete':
if self.gameState == 'complete':
return
self.rpi.openDoors(True)
self.dimmer.setDimmer1(100)
self.dimmer.setDimmer2(100)
self.player.setTrack2(6)
logging.info("Entering game state \"%s\"" % newState)
self.gameState = newState
def ledsOn(self):
self.dimmer.setDimmer4(30)
self.dimmer.setDimmer1(DIM1_TRAIN)
self.dimmer.setDimmer2(DIM2_TRAIN)
def ledsOff(self):
self.dimmer.setDimmer4(0)
self.dimmer.setDimmer1(DIM1_NORMAL)
self.dimmer.setDimmer2(DIM2_NORMAL)
def timeSyncer(self):
logging.info("Status/time updater thread started")
ledState = [False] * 10
ledList = ['VALVE', 'FLOOR', 'RFID', 'KEY', 'MAP', 'P2K', 'WC', 'PBX_Task1', 'PBX_Task2', 'SNAKE']
while True:
logging.debug("Updating nodes")
for name in self.nodeMap:
try:
self.nodeMap[name].update()
except Exception as e:
logging.warning("Failed to update %s (%s)" % (name, str(e)))
try:
idx = 0
isNew = False
isComplete = True
for name in ledList:
if ledState[idx] == False and self.nodeMap[name].done == True:
# new puzzle has been solved
isNew = True
ledState[idx] = self.nodeMap[name].done
if not ledState[idx]: isComplete = False
idx += 1
if isComplete:
self.setGameState("complete")
if isNew:
self.player.triggerTrack2(4)
self.bomb.setLeds(ledState)
except Exception as e:
logging.warning("Failed to update Bomb LED state (%s)" % str(e))
logging.debug("Syncing time")
try:
t = self.bomb.getTime()
if t != None:
(minutes, seconds) = t
self.minutes = minutes
self.seconds = seconds
logging.debug("Time sync %02d:%02d (was %02d:%02d)" % (minutes, seconds, self.minutes, self.seconds))
except Exception as e:
logging.warning("Failed to get time (%s)" % str(e))
time.sleep(3)
return
def timeTicker(self):
logging.info("Time ticker thread started")
while True:
if self.gameState == 'active' and self.bomb.enabled:
if self.seconds == 0:
logging.info("%d minutes remaining" % self.minutes)
if self.minutes > 0:
self.minutes -= 1
self.seconds = 59
else:
self.seconds -= 1
time.sleep(1)
return
def darkness(self):
self.dimmer.setDimmer1(0)
self.dimmer.setDimmer2(0)
self.dimmer.setDimmer4(0)
def explode(self):
self.bus.getParameter('ALL', 3, bytearray( (EXPLODE_TIME, EXPLODE_TRACK) ))
def scriptThread(self):
logging.info("Scheduler thread started")
actions = {
'Station' : lambda: self.player.setTrack1(1),
'Train' : lambda: self.player.setTrack1(2),
'Tick' : lambda: self.player.setTrack1(5),
'Darkness' : lambda: self.darkness(),
'Explode' : lambda: self.explode(),
'Laugh' : lambda: self.player.setTrack2(2),
'Announce' : lambda: self.player.setTrack2(1),
'Radio' : lambda: self.player.setTrack3(1),
'SnakeOn' : lambda: self.snake.getEnabled(True),
'SnakeOff' : lambda: self.snake.getEnabled(False),
'LedsOn' : lambda: self.ledsOn(),
'LedsOff' : lambda: self.ledsOff(),
#'StartGame' : lambda: self.setGameState('play')
}
timeMap = dict()
for (timeStamp, action) in self.timeTable:
if not timeStamp in timeMap:
timeMap[timeStamp] = list()
timeMap[timeStamp].append( action )
timeKeys = sorted(timeMap.keys(), reverse=False)
#logging.debug("Time cues: %s" % str(timeKeys))
#try:
# self.setGameState('play')
#except Exception as e:
# logging.error("Unable to start the game (%s)" % str(e))
idx = 0
lastDimmer1 = None
lastDimmer2 = None
while True:
if self.gameState == 'service':
idx = 0
time.sleep(1)
continue
if self.gameState != 'active':
time.sleep(1)
continue
if idx >= len(timeKeys):
logging.info("Script execution complete")
time.sleep(60)
continue
timeElapsed = self.getTime()
if timeKeys[idx] <= timeElapsed:
for action in timeMap[timeKeys[idx]]:
logging.info("New action: %s (time %02d:%02d)" % (action, self.minutes, self.seconds))
try:
actions[action]()
except Exception as e:
logging.warning("Failed to execute action %s (%s)" % (action, str(e)))
idx += 1
time.sleep(1)
return
def readScript(self):
while self.script:
line = self.script.readline()
if not line:
break
line = line.strip()
if not line or line[0] == '#':
continue
fields = line.split()
if len(fields) != 2:
logging.warning("Expected 2 fields per line in script file")
continue
(timeString, action) = fields
(minString, secString) = timeString.split(':')
timeStamp = int(minString) + int(secString) / 60.0
self.timeTable.append((timeStamp, action))
pass
def loop(self):
#try:
# self.restartAll()
#except Exception as e:
# logging.warning("Failed to initialize nodes (%s)" % str(e))
t1 = threading.Thread(target=self.timeTicker)
t2 = threading.Thread(target=self.timeSyncer)
t3 = threading.Thread(target=self.scriptThread)
t1.daemon = True
t2.daemon = True
t3.daemon = True
t1.start()
t2.start()
t3.start()
webapp.app.config['MASTER'] = self
webapp.app.logger.setLevel(logging.DEBUG)
log = logging.getLogger('werkzeug')
if log: log.setLevel(logging.DEBUG)
webapp.startServer()
#webapp.app.run(debug=False, host='0.0.0.0', port=8088)
while True:
time.sleep(5)
return
def readConfig(values):
#home = os.path.expanduser("~")
configPath = '/etc/roombreak.config'
try:
#file = open(os.path.join(configDir, 'roombreak.config'), 'r')
file = open(configPath, 'r')
except:
file = None
if not file:
return values
for line in file:
line = line.rstrip()
if not line or line[0] == '#': continue
(key, val) = line.split()
values[key] = val
file.close()
return values
def main(args):
readConfig(vars(args))
if args.debug:
level = logging.DEBUG
else:
level = logging.INFO
logstream = sys.stderr
try:
if args.logMain:
logstream = open(args.logMain, 'w')
except:
pass
logging.basicConfig(level = level, stream = logstream)
logging.debug(args)
ser = rs485.RS485(args.port, args.baudrate, timeout = 0.2, writeTimeout = 0.2)
bus = Bus(ser)
script = None
try:
if args.script:
script = open(args.script)
except Exception as e:
logging.warning("Unable to open script file (%s)" % str(e))
master = Master(bus, script)
master.loop()
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Metro@roombreak master scheduler')
parser.add_argument('-p', '--port', help = 'Serial port device')
parser.add_argument('-b', '--baudrate', help = 'Serial baudrate (default 19200)', type = int, default = 19200)
parser.add_argument('-d', '--debug', help = 'Debug', action = 'store_true', default = False)
parser.add_argument('-s', '--script', help = 'Script')
args = parser.parse_args(sys.argv[1:])
main(args)
|
preprocessing.py
|
import os
from collections import deque
from multiprocessing import Process
import cv2 as cv
import dlib
import numpy as np
from skimage import transform as tf
from tqdm import tqdm
STD_SIZE = (224, 224)
stablePntsIDs = [33, 36, 39, 42, 45]
def shape_to_array(shape):
coords = np.empty((68, 2))
for i in range(0, 68):
coords[i][0] = shape.part(i).x
coords[i][1] = shape.part(i).y
return coords
def cut_patch(img, landmarks, height, width, threshold=5):
center_x, center_y = np.mean(landmarks, axis=0)
if center_y - height < 0:
center_y = height
if center_y - height < 0 - threshold:
raise Exception('too much bias in height')
if center_x - width < 0:
center_x = width
if center_x - width < 0 - threshold:
raise Exception('too much bias in width')
if center_y + height > img.shape[0]:
center_y = img.shape[0] - height
if center_y + height > img.shape[0] + threshold:
raise Exception('too much bias in height')
if center_x + width > img.shape[1]:
center_x = img.shape[1] - width
if center_x + width > img.shape[1] + threshold:
raise Exception('too much bias in width')
cutted_img = np.copy(img[int(round(center_y) - round(height)): int(round(center_y) + round(height)),
int(round(center_x) - round(width)): int(round(center_x) + round(width))])
return cutted_img
def crop_patch(frames, landmarks, mean_face_landmarks):
"""Crop mouth patch
:param str frames: video_frames
:param list landmarks: interpolated landmarks
"""
for frame_idx, frame in enumerate(frames):
if frame_idx == 0:
q_frame, q_landmarks = deque(), deque()
sequence = []
q_landmarks.append(landmarks[frame_idx])
q_frame.append(frame)
if len(q_frame) == 12:
smoothed_landmarks = np.mean(q_landmarks, axis=0)
cur_landmarks = q_landmarks.popleft()
cur_frame = q_frame.popleft()
# -- affine transformation
trans = tf.estimate_transform('similarity', smoothed_landmarks[stablePntsIDs, :], mean_face_landmarks[stablePntsIDs, :])
trans_frame = tf.warp(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)
trans_frame = trans_frame * 255 # note output from wrap is double image (value range [0,1])
trans_frame = trans_frame.astype('uint8')
trans_landmarks = trans(cur_landmarks)
# -- crop mouth patch
sequence.append(cut_patch(trans_frame, trans_landmarks[48:68], 60, 60))
if frame_idx == len(landmarks) - 1:
while q_frame:
cur_frame = q_frame.popleft()
# -- transform frame
trans_frame = tf.warp(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)
trans_frame = trans_frame * 255 # note output from wrap is double image (value range [0,1])
trans_frame = trans_frame.astype('uint8')
# -- transform landmarks
trans_landmarks = trans(q_landmarks.popleft())
# -- crop mouth patch
sequence.append(cut_patch(trans_frame, trans_landmarks[48:68], 60, 60))
return np.array(sequence)
return None
def linear_interpolate(landmarks, start_idx, stop_idx):
start_landmarks = landmarks[start_idx]
stop_landmarks = landmarks[stop_idx]
delta = stop_landmarks - start_landmarks
for idx in range(1, stop_idx - start_idx):
landmarks[start_idx + idx] = start_landmarks + idx / float(stop_idx - start_idx) * delta
return landmarks
def landmarks_interpolate(landmarks):
"""Interpolate landmarks
param list landmarks: landmarks detected in raw videos
"""
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
if not valid_frames_idx:
return None
for idx in range(1, len(valid_frames_idx)):
if valid_frames_idx[idx] - valid_frames_idx[idx - 1] == 1:
continue
else:
landmarks = linear_interpolate(landmarks, valid_frames_idx[idx - 1], valid_frames_idx[idx])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
# -- Corner case: keep frames at the beginning or at the end failed to be detected.
if valid_frames_idx:
landmarks[:valid_frames_idx[0]] = [landmarks[valid_frames_idx[0]]] * valid_frames_idx[0]
landmarks[valid_frames_idx[-1]:] = [landmarks[valid_frames_idx[-1]]] * (len(landmarks) - valid_frames_idx[-1])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
assert len(valid_frames_idx) == len(landmarks), "not every frame has landmark"
return landmarks
def preprocess_sample(file, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop):
"""
Function to preprocess each data sample.
"""
videoFile = file + ".mp4"
audioFile = file + ".flac"
roiFile = file + ".png"
# Extract the audio from the video file using the FFmpeg utility and save it to a flac file.
if withaudio:
v2aCommand = "ffmpeg -y -v quiet -i " + videoFile + " -ac 1 -ar 16000 -vn " + audioFile
os.system(v2aCommand)
# for each frame, resize to 224x224 and crop the central 112x112 region
captureObj = cv.VideoCapture(videoFile)
frames = list()
landmarks = list()
while captureObj.isOpened():
ret, frame = captureObj.read()
if ret:
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if not len(frame) == 224:
frame = cv.resize(frame, (224, 224))
frames.append(frame)
face_rects = face_detector(frame, 0) # Detect face
if len(face_rects) < 1:
landmarks.append(None)
continue
rect = face_rects[0] # Proper number of face
landmark = landmark_detector(frame, rect) # Detect face landmarks
landmark = shape_to_array(landmark)
landmarks.append(landmark)
else:
break
captureObj.release()
preprocessed_landmarks = landmarks_interpolate(landmarks)
if preprocessed_landmarks is None:
if defaultcrop == "lrs":
frames = [frame[52:172, 52:172] for frame in frames]
else:
frames = [frame[103: 223, 67: 187] for frame in frames]
else:
frames = crop_patch(frames, preprocessed_landmarks, mean_face_landmarks)
assert frames is not None, "cannot crop from {}.".format(videoFile)
cv.imwrite(roiFile, np.concatenate(frames, axis=1).astype(int))
def preprocess_sample_list(filesList, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop):
for file in tqdm(filesList, leave=True, desc="Preprocess", ncols=75):
preprocess_sample(file, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop)
def preprocessing(filesList, processes, landmark_detector, mean_face_landmarks, withaudio, defaultcrop):
# Preprocessing each sample
print("\nNumber of data samples to be processed = %d" % (len(filesList)))
print("\n\nStarting preprocessing ....\n")
face_detector = dlib.get_frontal_face_detector()
def splitlist(inlist, chunksize):
return [inlist[x:x + chunksize] for x in range(0, len(inlist), chunksize)]
filesListSplitted = splitlist(filesList, int((len(filesList) / processes)))
process_list = []
for subFilesList in filesListSplitted:
p = Process(target=preprocess_sample_list, args=(subFilesList, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop))
process_list.append(p)
p.Daemon = True
p.start()
for p in process_list:
p.join()
|
celery.py
|
import json
import multiprocessing
from typing import Any, Dict, List, Optional
from celery import Celery
from celery.result import AsyncResult
from redis import Redis
from typing_extensions import TypedDict
from openff.bespokefit.executor.services.models import Error
from openff.bespokefit.executor.utilities.typing import Status
class TaskInformation(TypedDict):
id: str
status: Status
result: Optional[Dict[str, Any]]
error: Optional[Dict[str, Any]]
def get_status(task_result: AsyncResult) -> Status:
return {
"PENDING": "waiting",
"STARTED": "running",
"RETRY": "running",
"FAILURE": "errored",
"SUCCESS": "success",
}[task_result.status]
def configure_celery_app(
app_name: str, redis_connection: Redis, include: List[str] = None
):
redis_host_name = redis_connection.connection_pool.connection_kwargs["host"]
redis_port = redis_connection.connection_pool.connection_kwargs["port"]
redis_db = redis_connection.connection_pool.connection_kwargs["db"]
celery_app = Celery(
app_name,
backend=f"redis://{redis_host_name}:{redis_port}/{redis_db}",
broker=f"redis://{redis_host_name}:{redis_port}/{redis_db}",
include=include,
)
celery_app.conf.task_track_started = True
celery_app.conf.task_default_queue = app_name
celery_app.conf.broker_transport_options = {"visibility_timeout": 1000000}
celery_app.conf.result_expires = None
return celery_app
def _spawn_worker(celery_app, concurrency: int = 1):
worker = celery_app.Worker(
concurrency=concurrency,
loglevel="INFO",
logfile=f"celery-{celery_app.main}.log",
quiet=True,
hostname=celery_app.main,
)
worker.start()
def spawn_worker(
celery_app, concurrency: int = 1, asynchronous: bool = True
) -> Optional[multiprocessing.Process]:
if concurrency < 1:
return
if asynchronous: # pragma: no cover
worker_process = multiprocessing.Process(
target=_spawn_worker, args=(celery_app, concurrency), daemon=True
)
worker_process.start()
return worker_process
else:
_spawn_worker(celery_app, concurrency)
def get_task_information(app: Celery, task_id: str) -> TaskInformation:
task_result = AsyncResult(task_id, app=app)
task_output = (
None
if not isinstance(task_result.result, str)
else json.loads(task_result.result)
)
task_raw_error = (
None
if not isinstance(task_result.result, BaseException)
else task_result.result
)
task_error = (
None
if task_raw_error is None
else Error(
type=task_raw_error.__class__.__name__,
message=str(task_raw_error),
traceback=task_result.traceback,
)
)
task_status = get_status(task_result)
return TaskInformation(
id=task_id,
status=task_status,
result=task_output if task_status != "errored" else None,
error=None if not task_error else task_error.dict(),
)
|
jobs.py
|
# -*- coding: utf-8 -*-
"""
jobs
~~~~~~~~~~~~~~
Jobs defined here.
:copyright: (c) 2016 by fengweimin.
:date: 16/8/12
"""
import os
import threading
import time
from collections import Counter
import schedule
from app.models import Post
from app.views.public import okex_global_price_new
from app.views.public import bitfinex_global_price_new
post_view_times_counter = Counter()
def update_view_times(app):
"""
Update view times for posts.
"""
app.logger.info('Scheduler update_view_times running: %s' % post_view_times_counter)
d = dict(post_view_times_counter)
post_view_times_counter.clear()
for k, v in d.iteritems():
p = Post.find_one({'_id': k})
if p:
try:
p.viewTimes += v
p.save()
except:
app.logger.exception('Failed when updating the viewTime for album %s' % p._id)
def okex_update_prices(app):
"""
Update view times for posts.
"""
app.logger.info('Scheduler update_prices running: %s' % post_view_times_counter)
try:
okex_global_price_new(app)
except:
app.logger.exception('Failed when updating the prices')
def bitfinex_update_prices(app):
"""
Update view times for posts.
"""
app.logger.info('Scheduler update_prices running: %s' % post_view_times_counter)
try:
bitfinex_global_price_new(app)
except:
app.logger.exception('Failed when updating the prices')
def run_schedule(app):
"""
Invoke schedule.
"""
# For schedule rules please refer to https://github.com/dbader/schedule
schedule.every(20).minutes.do(update_view_times, app)
schedule.every(1).minutes.do(okex_update_prices, app)
schedule.every(1).minutes.do(bitfinex_update_prices, app)
while True:
schedule.run_pending()
time.sleep(1)
def init_schedule(app):
"""
Init.
"""
# http://stackoverflow.com/questions/9449101/how-to-stop-flask-from-initialising-twice-in-debug-mode/
if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
t = threading.Thread(target=run_schedule, args=(app,))
# Python threads don't die when the main thread exits, unless they are daemon threads.
t.setDaemon(True)
t.start()
|
async_checkpoint.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Hook for asynchronous checkpointing.
This hook dispatches checkpoint writing operations in a separate thread to
allow execution to continue on the main thread.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._write_graph_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
def _write_graph_fn(self):
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
self._write_graph_thread = threading.Thread(target=_write_graph_fn,
args=[self])
self._write_graph_thread.start()
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
logging.info("Triggering checkpoint. %s", global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
if self._write_graph_thread:
logging.info("Waiting for any pending write_graph to finish.")
self._write_graph_thread.join()
last_step = session.run(self._global_step_tensor)
# Save the last checkpoint synchronously if needed.
if last_step != self._timer.last_triggered_step():
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step, asynchronous=True):
"""Saves the latest checkpoint, returns should_stop."""
# Skip saving on step 0
if step == 0:
return
def _save_fn():
"""Run the saver process."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
start_time = time.time()
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
end_time = time.time()
logging.info("Checkpoint actual writing time: (%.3f sec)",
end_time - start_time)
logging.info("Checkpoint finished for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
if not asynchronous:
_save_fn()
return
if self._save_thread is not None:
self._save_thread.join(timeout=0.1)
if self._save_thread.is_alive():
logging.info("Saver thread still in progress, skipping checkpoint.")
return
self._save_thread = threading.Thread(target=_save_fn)
self._save_thread.start()
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
|
main_11_Predictors.py
|
import Redes.Red_LSTM_Fine as Interface
import Auxiliary.preprocessingData as Data
import Auxiliary.GPUtil as GPU
import numpy as np
import os
from threading import Thread
import pickle
import tensorflow as tf
###################################
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2, 3" # To force tensorflow to only see one GPU.
# TensorFlow wizardry
config = tf.ConfigProto()
# Don't pre-allocate memory; allocate as-needed
config.gpu_options.allow_growth = True
# Only allow a total of half the GPU memory to be allocated
#config.gpu_options.per_process_gpu_memory_fraction = 0.3
###################################
def launch_cnn(index_data, index_gpu, graph, name_path, X_train, y_train, X_test, y_test):
with graph.as_default():
sess = tf.Session(config=config, graph=graph)
with tf.device('/gpu:'+str(index_gpu)):
with sess.as_default():
print("y_train_external: " + str(len(y_train[index_data])))
model = Interface.Red.build((1,128,1), 2, number_convolutional_layers=4, first_number_filters=256, dropout=0.5)
history, model = Interface.Red.train(model, index_data, name_path + '_all_'+str(index_data), X_train[index_data], y_train[index_data], X_test[index_data],
y_test[index_data], noise=1, l2_noise=1, weight_decay_noise=0.00001, stepped = False )
#, class_weight = {0 : 0.4, 1 : 0.6}
with open(name_path + str(index_data) + '.pkl', 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([history.history], f)
# Interface.Red.plot_info(history)
model.save_weights(name_path + str(index_data) + "_1.h5")
X_train, y_train, subjects_train, X_test, y_test, subjects_test = Data.loadData("data_5/predictores_normalizadoEstandarizado_subjects")
name_path = "../Results/result_05/lstm_fine_subjects/escalado/sin_weight_subject/"
try:
# Create target Directory
os.makedirs(name_path)
print("Directory " , name_path , " Created ")
except FileExistsError:
print("Directory " , name_path , " already exists")
name_file = "result"
number_folders = 10
number_gpus = 4
#number_folders = 1
#number_gpus = 1
temp_number_folder = 0
while temp_number_folder < number_folders:
threads = []
for i in range(number_gpus):
if temp_number_folder < number_folders:
graph = tf.Graph()
t = Thread(target=launch_cnn, args=(temp_number_folder, i ,graph, name_path + name_file, X_train, y_train, X_test, y_test))
temp_number_folder = temp_number_folder + 1
threads.append(t)
# Start all threads
for x in threads:
x.start()
# Wait for all of them to finish
for x in threads:
x.join()
quit()
|
test_asyncore.py
|
import asyncore
import unittest
import select
import os
import socket
import threading
import sys
import time
from test import support
from test.support import TESTFN, run_unittest, unlink
from io import BytesIO
from io import StringIO
HOST = support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
if hasattr(select, 'poll'):
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEquals(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEquals(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
d.handle_accept()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event',
'warning: unhandled accept event']
self.assertEquals(lines, expected)
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_send(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = support.bind_port(self.sock)
cap = BytesIO()
args = (self.evt, cap, self.sock)
threading.Thread(target=capture_server, args=args).start()
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, self.port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
self.evt.wait()
self.assertEqual(cap.getvalue(), data*2)
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
if hasattr(asyncore, 'file_wrapper'):
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
open(TESTFN, 'wb').write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
self.assertEqual(open(TESTFN, 'rb').read(), self.d + d1 + d2)
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll]
if hasattr(asyncore, 'file_wrapper'):
tests.append(FileWrapperTest)
run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
process_prometheus_metrics.py
|
import logging
import threading
import time
import ipcqueue.posixmq
import prometheus_client.registry
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.core.management.base import BaseCommand
from ...backends.prometheus import PrometheusMultiprocessMonitoring
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Process Prometheus metrics"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
monitoring = apps.get_app_config(
"django_prometheus_monitoring"
).monitoring
if not isinstance(monitoring, PrometheusMultiprocessMonitoring):
raise ImproperlyConfigured(
"Monitoring backend is not instance of "
"PrometheusMultiprocessMonitoring"
)
self.monitoring = monitoring
self.metrics_lock = threading.Lock()
self.stop_event = threading.Event()
self.registry = prometheus_client.registry.CollectorRegistry(
auto_describe=True
)
def handle(self, *args, **options):
metrics_collector = threading.Thread(target=self.count_stats)
metrics_collector.start()
try:
self.consume_metrics()
finally:
self.stop_event.set()
def consume_metrics(self):
while 1:
try:
(
metric_cls,
name,
documentation,
labelnames,
method,
value,
labelvalues,
labelkwargs,
) = self.monitoring.queue.get(block=True)
if name not in self.monitoring.metrics:
metric = metric_cls(name, documentation, labelnames)
else:
metric = self.monitoring.metrics[name]
prometheus_metric = metric.get_prometheus_inst(self.registry)
self.metrics_lock.acquire()
try:
if labelvalues or labelkwargs:
prometheus_metric = prometheus_metric.labels(
*labelvalues, **labelkwargs
)
getattr(prometheus_metric, method)(value)
finally:
self.metrics_lock.release()
except ipcqueue.posixmq.QueueError as exc:
logger.error("Queue error: %d %s", exc.errno, exc.msg)
except Exception as exc:
logger.exception("Metrics consumer error: %s", exc)
def count_stats(self):
while 1:
try:
self.metrics_lock.acquire()
try:
stats = prometheus_client.generate_latest(self.registry)
finally:
self.metrics_lock.release()
self.monitoring.set_stats(stats)
wait_for_event(self.stop_event, 5.0)
except Exception as exc:
logger.exception("Metrics collector error: %s", exc)
def wait_for_event(event, seconds, step=0.1):
for unused in range(int(seconds / step)):
if event.is_set():
return
time.sleep(0.1)
def run_metrics_consumer():
call_command(__name__.split(".")[-1])
|
UdpComms.py
|
# Created by Youssef Elashry to allow two-way communication between Python3 and Unity to send and receive strings
# Feel free to use this in your individual or commercial projects BUT make sure to reference me as: Two-way communication between Python 3 and Unity (C#) - Y. T. Elashry
# It would be appreciated if you send me how you have used this in your projects (e.g. Machine Learning) at youssef.elashry@gmail.com
# Use at your own risk
# Use under the Apache License 2.0
class UdpComms():
def __init__(self,udpIP,portTX,portRX,enableRX=False,suppressWarnings=True):
"""
Constructor
:param udpIP: Must be string e.g. "127.0.0.1"
:param portTX: integer number e.g. 8000. Port to transmit from i.e From Python to other application
:param portRX: integer number e.g. 8001. Port to receive on i.e. From other application to Python
:param enableRX: When False you may only send from Python and not receive. If set to True a thread is created to enable receiving of data
:param suppressWarnings: Stop printing warnings if not connected to other application
"""
import socket
self.udpIP = udpIP
self.udpSendPort = portTX
self.udpRcvPort = portRX
self.enableRX = enableRX
self.suppressWarnings = suppressWarnings # when true warnings are suppressed
self.isDataReceived = False
self.dataRX = None
# Connect via UDP
self.udpSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # internet protocol, udp (DGRAM) socket
self.udpSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # allows the address/port to be reused immediately instead of it being stuck in the TIME_WAIT state waiting for late packets to arrive.
self.udpSock.bind((udpIP, portRX))
# Create Receiving thread if required
if enableRX:
import threading
self.rxThread = threading.Thread(target=self.ReadUdpThreadFunc, daemon=True)
self.rxThread.start()
def __del__(self):
self.CloseSocket()
def CloseSocket(self):
# Function to close socket
self.udpSock.close()
def SendData(self, strToSend):
# Use this function to send string to C#
self.udpSock.sendto(bytes(strToSend,'utf-8'), (self.udpIP, self.udpSendPort))
def ReceiveData(self):
"""
Should not be called by user
Function BLOCKS until data is returned from C#. It then attempts to convert it to string and returns on successful conversion.
An warning/error is raised if:
- Warning: Not connected to C# application yet. Warning can be suppressed by setting suppressWarning=True in constructor
- Error: If data receiving procedure or conversion to string goes wrong
- Error: If user attempts to use this without enabling RX
:return: returns None on failure or the received string on success
"""
if not self.enableRX: # if RX is not enabled, raise error
raise ValueError("Attempting to receive data without enabling this setting. Ensure this is enabled from the constructor")
data = None
try:
data, _ = self.udpSock.recvfrom(1024)
data = data.decode('utf-8')
except WindowsError as e:
if e.winerror == 10054: # An error occurs if you try to receive before connecting to other application
if not self.suppressWarnings:
print("Are You connected to the other application? Connect to it!")
else:
pass
else:
raise ValueError("Unexpected Error. Are you sure that the received data can be converted to a string")
return data
def ReadUdpThreadFunc(self): # Should be called from thread
"""
This function should be called from a thread [Done automatically via constructor]
(import threading -> e.g. udpReceiveThread = threading.Thread(target=self.ReadUdpNonBlocking, daemon=True))
This function keeps looping through the BLOCKING ReceiveData function and sets self.dataRX when data is received and sets received flag
This function runs in the background and updates class variables to read data later
"""
self.isDataReceived = False # Initially nothing received
while True:
data = self.ReceiveData() # Blocks (in thread) until data is returned (OR MAYBE UNTIL SOME TIMEOUT AS WELL)
self.dataRX = data # Populate AFTER new data is received
self.isDataReceived = True
# When it reaches here, data received is available
def ReadReceivedData(self):
"""
This is the function that should be used to read received data
Checks if data has been received SINCE LAST CALL, if so it returns the received string and sets flag to False (to avoid re-reading received data)
data is None if nothing has been received
:return:
"""
data = None
if self.isDataReceived: # if data has been received
self.isDataReceived = False
data = self.dataRX
self.dataRX = None # Empty receive buffer
return data
|
__init__.py
|
# coding=utf-8
""" User Interface Tools """
import ee
import threading
import pprint
from . import dispatcher
ASYNC = False
def eprint(*args, **kwargs):
""" Print EE Objects. Similar to `print(object.getInfo())` but with
some magic (lol)
:param eeobject: object to print
:type eeobject: ee.ComputedObject
:param indent: indentation of the print output
:type indent: int
:param do_async: call getInfo() asynchronously
:type do_async: bool
"""
indent = kwargs.get('indent', 2)
do_async = kwargs.get('do_async', ASYNC)
pp = pprint.PrettyPrinter(indent=indent)
info_return = [None]*len(args)
def get_info(eeobject, index):
""" Get Info """
info_return[index] = dispatcher.dispatch(eeobject)
for i, eeobject in enumerate(args):
# DO THE SAME FOR EVERY OBJECT
if do_async:
thread = threading.Thread(target=get_info,
args=(eeobject, i))
thread.start()
else:
get_info(eeobject, i)
for result in info_return:
pp.pprint(result)
def getInfo(eeobject):
""" Get eeobject information (getInfo) asynchronously. For not async just
use `ee.data.getInfo` """
class newDict(dict):
def get(self):
return self['info']
def __call__(self):
return self.get()
result = newDict({'info':None})
def get_info(eeobject, from_ee):
if from_ee:
info = eeobject.getInfo()
else:
info = eeobject
result['info'] = info
module = getattr(eeobject, '__module__', None)
parent = module.split('.')[0] if module else None
if parent == ee.__name__:
thread = threading.Thread(target=get_info, args=(eeobject, True))
thread.start()
else:
get_info(eeobject, False)
return result
|
other-sites.py
|
'''
NERYS
a universal product monitor
Current Module: Other Sites
Usage:
NERYS will monitor specified sites for keywords and sends a Discord alert
when a page has a specified keyword. This can be used to monitor any site
on a product release date to automatically detect when a product has been
uploaded. Useful when monitoring hundreds of sites for shops in different
timezones.
Complete:
- find all products on Shopify site by keyword
- send discord notifications
- monitor for new products
- optimization for Shopify to return product checkout links by size
- find all products on other sites by keyword
- attempt to get product page links for universal sites
Left To Do:
- monitor for Shopify restocks
- monitor for restocks on other sites
-- find sold out by keyword
-- find sizes by keyword
-- find countdown timer by keyword
- detect cloudflare
- get product picture for other sites
- optimization for footsites
Credits:
Niveen Jegatheeswaran - Main Dev - https://github.com/snivyn/
kyb3r - Discord Embed - https://github.com/kyb3r/
'''
import requests
from bs4 import BeautifulSoup as soup
import requests
from log import log as log
import time
from datetime import datetime
import random
import sqlite3
from bs4 import BeautifulSoup as soup
from discord_hooks import Webhook
from threading import Thread
class Product():
def __init__(self, title, link, stock, keyword):
'''
(str, str, bool, str) -> None
Creates an instance of the Product class.
'''
# Setup product attributes
self.title = title
self.stock = stock
self.link = link
self.keyword = keyword
def read_from_txt(path):
'''
(None) -> list of str
Loads up all sites from the sitelist.txt file in the root directory.
Returns the sites as a list
'''
# Initialize variables
raw_lines = []
lines = []
# Load data from the txt file
try:
f = open(path, "r")
raw_lines = f.readlines()
f.close()
# Raise an error if the file couldn't be found
except:
log('e', "Couldn't locate <" + path + ">.")
raise FileNotFound()
if(len(raw_lines) == 0):
raise NoDataLoaded()
# Parse the data
for line in raw_lines:
lines.append(line.strip("\n"))
# Return the data
return lines
def add_to_db(product):
'''
(Product) -> bool
Given a product <product>, the product is added to a database <products.db>
and whether or not a Discord alert should be sent out is returned. Discord
alerts are sent out based on whether or not a new product matching
keywords is found.
'''
# Initialize variables
title = product.title
stock = str(product.stock)
link = product.link
keyword = product.keyword
alert = False
# Create database
conn = sqlite3.connect('products.db')
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS products(title TEXT, link TEXT UNIQUE, stock TEXT, keywords TEXT)""")
# Add product to database if it's unique
try:
c.execute("""INSERT INTO products (title, link, stock, keywords) VALUES (?, ?, ?, ?)""", (title, link, stock, keyword))
log('s', "Found new product with keyword " + keyword + ". Link = " + link)
alert = True
except:
# Product already exists
pass
#log('i', "Product at URL <" + link + "> already exists in the database.")
# Close connection to the database
conn.commit()
c.close()
conn.close()
# Return whether or not it's a new product
return alert
def send_embed(product):
'''
(Product) -> None
Sends a discord alert based on info provided.
'''
url = 'https://discord.com/api/webhooks/728820147346997278/ocPnHwKHaeCLeq1N1UJ7nAmO1qvat3sxr2G5xv72TubAGZWmhajDzknK9CfR6ZpvxA2i'
embed = Webhook(url, color=123123)
embed.set_author(name='NERYS', icon='https://static.zerochan.net/Daenerys.Targaryen.full.2190849.jpg')
embed.set_desc("Found product based on keyword " + product.keyword)
embed.add_field(name="Link", value=product.link)
embed.set_footer(text='NERYS by @snivynGOD', icon='https://static.zerochan.net/Daenerys.Targaryen.full.2190849.jpg', ts=True)
embed.post()
def monitor(link, keywords):
'''
(str, list of str) -> None
Given a URL <link> and keywords <keywords>, the URL is scanned and alerts
are sent via Discord when a new product containing a keyword is detected.
'''
log('i', "Checking site <" + link + ">...")
# Parse the site from the link
pos_https = link.find("https://")
pos_http = link.find("http://")
if(pos_https == 0):
site = link[8:]
end = site.find("/")
if(end != -1):
site = site[:end]
site = "https://" + site
else:
site = link[7:]
end = site.find("/")
if(end != -1):
site = site[:end]
site = "http://" + site
# Get all the links on the "New Arrivals" page
try:
r = requests.get(link, timeout=5, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed. Retrying...")
time.sleep(5)
try:
r = requests.get(link, timeout=8, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed.")
return
page = soup(r.text, "html.parser")
raw_links = page.findAll("a")
hrefs = []
for raw_link in raw_links:
try:
hrefs.append(raw_link["href"])
except:
pass
# Check for links matching keywords
for href in hrefs:
found = False
for keyword in keywords:
if(keyword.upper() in href.upper()):
found = True
if("http" in href):
product_page = href
else:
product_page = site + href
product = Product("N/A", product_page, True, keyword)
alert = add_to_db(product)
if(alert):
send_embed(product)
if(__name__ == "__main__"):
# Ignore insecure messages
requests.packages.urllib3.disable_warnings()
# Keywords (seperated by -)
keywords = [
"jordan",
"dunk",
"pharrell",
"free-throw-line",
"kendrick",
"tinker",
"game-royal",
"yeezy",
"human-race",
"big-bang",
"dont-trip",
"kung-fu-kenny",
"playstation",
"valentine",
"ovo-air-jordan",
"ovo-jordan",
"air-jordan-1",
"wotherspoon"
]
# Load sites from file
sites = read_from_txt("other-sites.txt")
# Start monitoring sites
while(True):
threads = []
for site in sites:
t = Thread(target=monitor, args=(site, keywords))
threads.append(t)
t.start()
time.sleep(2) # 2 second delay before going to the next site
|
app.py
|
from src.kafka_module.kf_service import process_block_merger_kf, block_merger_request_worker#, block_merger_request_worker_ocr
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_error
from flask import Flask
from flask.blueprints import Blueprint
from flask_cors import CORS
from src import routes
import config
import threading
import time
from src.utilities.app_context import LOG_WITHOUT_CONTEXT
import multiprocessing
merge_app = Flask(__name__)
def start_kafka():
try:
t1 = threading.Thread(target=process_block_merger_kf, name='BM-consumer-thread')
t1.start()
log_info("multithread Kafka running on multithread", LOG_WITHOUT_CONTEXT)
t2 = threading.Thread(target=block_merger_request_worker, name='BM-worker-thread')
t2.start()
log_info("Starting block_merger_request_worker", LOG_WITHOUT_CONTEXT)
# t3 = threading.Thread(target=block_merger_request_worker_ocr, name='BM-worker-ocr-thread')
# t3.start()
# log_info("Starting block_merger_request_worker_ocr", LOG_WITHOUT_CONTEXT)
# request_process = multiprocessing.Process(target=process_block_merger_kf)
# request_process.start()
# log_info("Starting block_merger_request_kf process", LOG_WITHOUT_CONTEXT)
#
# bm_process = []
# for p in range(config.BM_PROCESSES):
# bm_process.append(multiprocessing.Process(target=block_merger_request_worker))
# bm_process[-1].start()
# log_info("multiprocessing Kafka running on bm worker : {} ".format(p), LOG_WITHOUT_CONTEXT)
#
# bm__ocr_process = []
# for p in range(config.BM_OCR_PROCESSES):
# bm__ocr_process.append(multiprocessing.Process(target=block_merger_request_worker_ocr))
# bm__ocr_process[-1].start()
# log_info("multiprocessing Kafka running on ocr worker : {} ".format(p), LOG_WITHOUT_CONTEXT)
except Exception as e:
log_error("threading ERROR WHILE RUNNING CUSTOM THREADS ", LOG_WITHOUT_CONTEXT, e)
if config.ENABLE_CORS:
cors = CORS(merge_app, resources={r"/api/*": {"origins": "*"}})
for blueprint in vars(routes).values():
if isinstance(blueprint, Blueprint):
merge_app.register_blueprint(blueprint, url_prefix=config.API_URL_PREFIX)
if __name__ == "__main__":
#multiprocessing.set_start_method('forkserver', force=True)
start_kafka()
print(merge_app.url_map)
merge_app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG)
|
base_api.py
|
from django.http.response import HttpResponse
from django.views.decorators.http import require_http_methods
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from neomodel import UniqueProperty, DoesNotExist
from numpy import median
from copy import deepcopy
from rdflib import URIRef, Literal, Graph
import json
import random
import time
import csv
import multiprocessing
from objectmodels.Dataset import Dataset
from objectmodels.License import License
from objectmodels.Lattice import Lattice
from neomodel import clear_neo4j_database, db
from neomodels import NeoFactory, ObjectFactory
from neomodels.NeoModels import LicenseModel, DatasetModel, license_filter_labels, dataset_filter_search, license_filter_sets
from neomodels.NeoModels import get_leaf_licenses, get_root_licenses, get_compliant_licenses, get_compatible_licenses
from utils.TimerDecorator import fn_timer, LOGGER
from utils.authentificator import need_auth
from utils import D3jsData
from utils import Constraints
from utils import LicenseGenerator
from utils import CSVExporter
from utils import ODRL
from utils import RDFExporter
LEVELS_FILE = "license_levels.json"
URL_VALIDATOR = URLValidator()
@require_http_methods(['GET', 'POST', 'DELETE'])
def license_path(request, graph):
if request.method == 'GET':
return get_licenses(request, graph)
elif request.method == 'POST':
return add_license(request, graph)
elif request.method == 'DELETE':
return delete_license(request)
@require_http_methods(['GET', 'POST'])
def dataset_path(request, graph):
if request.method == 'GET':
return get_datasets(request, graph)
elif request.method == 'POST':
return add_dataset(request, graph)
def get_licenses(request, graph):
response_content = []
for neo_license in LicenseModel.nodes.filter(graph__exact=graph):
license_object = ObjectFactory.objectLicense(neo_license)
response_content.append(license_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
def get_datasets(request, graph):
response_content = []
for neo_dataset in DatasetModel.nodes.filter(graph__exact=graph):
dataset_object = ObjectFactory.objectDataset(neo_dataset)
response_content.append(dataset_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@need_auth
def add_dataset(request, graph):
json_dataset = json.loads(request.body)
object_dataset = Dataset()
object_dataset.from_json(json_dataset)
neo_dataset = NeoFactory.NeoDataset(object_dataset, graph)
object_dataset = ObjectFactory.objectDataset(neo_dataset)
try:
neo_dataset.save()
response = HttpResponse(
json.dumps(object_dataset.to_json()),
content_type='application/json',
status=201,
)
except UniqueProperty:
response = HttpResponse(
json.dumps(object_dataset.to_json()),
content_type='application/json',
status=409,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_license_by_hash(request, hashed_sets, graph):
try:
neo_license = LicenseModel.nodes.filter(graph__exact=graph).get(hashed_sets=hashed_sets)
license_object = ObjectFactory.objectLicense(neo_license)
response = HttpResponse(
json.dumps(license_object.to_json()),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
except DoesNotExist:
response = HttpResponse(
"{}",
content_type='application/json',
status=404,
)
return response
def get_dataset_by_hash(request, hashed_uri, graph):
try:
neo_dataset = DatasetModel.nodes.filter(graph__exact=graph).get(hashed_uri=hashed_uri)
dataset_object = ObjectFactory.objectDataset(neo_dataset)
response = HttpResponse(
json.dumps(dataset_object.to_json()),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"{}",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_license_search(request, graph):
query = request.GET.get('query', None)
label = request.GET.get('label', None)
permissions = request.GET.get('permissions', None)
if is_empty(permissions):
permissions = None
obligations = request.GET.get('obligations', None)
if is_empty(obligations):
obligations = None
prohibitions = request.GET.get('prohibitions', None)
if is_empty(prohibitions):
prohibitions = None
neo_licenses = LicenseModel.nodes.filter(graph__exact=graph)
if query:
neo_licenses = license_filter_labels(query)
else:
if label:
neo_licenses = license_filter_labels(label)
if permissions:
neo_licenses = license_filter_sets(permissions, 'permissions')
if obligations:
neo_licenses = license_filter_sets(obligations, 'obligations')
if prohibitions:
neo_licenses = license_filter_sets(prohibitions, 'prohibitions')
response_content = []
for neo_license in neo_licenses:
license_object = ObjectFactory.objectLicense(neo_license)
response_content.append(license_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_dataset_search(request, graph):
query = request.GET.get('query', None)
label = request.GET.get('label', None)
descr = request.GET.get('descr', None)
uri = request.GET.get('uri', None)
neo_datasets = DatasetModel.nodes.filter(graph__exact=graph)
if query:
neo_datasets = dataset_filter_search(query, graph)
else:
if label:
neo_datasets = neo_datasets.filter(label__icontains=label)
if uri:
neo_datasets = neo_datasets.filter(uri__icontains=uri)
if descr:
neo_datasets = neo_datasets.filter(description__icontains=descr)
response_content = []
for neo_dataset in neo_datasets:
dataset_object = ObjectFactory.objectDataset(neo_dataset)
response_content.append(dataset_object.to_json())
response = HttpResponse(
json.dumps(response_content),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_datasets_of_licenses(request, hashed_sets, graph):
try:
neo_license = LicenseModel.nodes.filter(graph__exact=graph).get(hashed_sets=hashed_sets)
license_datasets = []
for dataset in neo_license.datasets.all():
dataset_object = ObjectFactory.objectDataset(dataset)
license_datasets.append(dataset_object.to_json())
response = HttpResponse(
json.dumps(license_datasets),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"[]",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def is_empty(str_list):
if str_list is not None:
if str_list.replace(' ', '').replace('[', '').replace(']', '').split(',')[0] == '':
return True
return False
@require_http_methods(['GET'])
@need_auth
@fn_timer
def add_license_experiment(request):
structure = request.GET.get('structure', 'linear_order')
order = request.GET.get('order', 'rand')
limit = int(request.GET.get('limit', '144'))
measure = request.GET.get('measure', 'time')
nb_exec = int(request.GET.get('executions', '1'))
aggregate = int(request.GET.get('aggregate', '1'))
measure_array_inf = {}
measure_array_supr = {}
measure_arry_med = {}
# We do not check viability
# Add from the bottom
lattice = Lattice(ODRL.ACTIONS)
for i in range(0, nb_exec):
LOGGER.info("infimum insertion begin")
licenses = LicenseGenerator.generate(structure, order, limit)
measure_array_inf[i] = []
inf_times = []
inf_nb_visits = []
for j, license in enumerate(licenses):
object_license = deepcopy(license)
if j % 100 == 0:
LOGGER.info("infimum: {}/{} classified".format(j, len(licenses)))
t0 = time.time()
nb_visit = add_license_to_lattice(object_license, lattice, method='infimum')
t1 = time.time()
if measure == 'time':
measure_array_inf[i].append(t1-t0)
else:
measure_array_inf[i].append(nb_visit)
inf_times.append(t1-t0)
inf_nb_visits.append(nb_visit)
# clear_neo4j_database(db)
LOGGER.info("infimum insertion end")
lattice = Lattice(ODRL.ACTIONS)
LOGGER.info("supremum insertion begin")
# Add from the top
measure_array_supr[i] = []
supr_times = []
supr_nb_visits = []
for j, license in enumerate(licenses):
object_license = deepcopy(license)
if j % 100 == 0:
LOGGER.info("supremum: {}/{} classified".format(j, len(licenses)))
t0 = time.time()
nb_visit = add_license_to_lattice(object_license, lattice, method='supremum')
t1 = time.time()
if measure == 'time':
measure_array_supr[i].append(t1-t0)
else:
measure_array_supr[i].append(nb_visit)
supr_times.append(t1-t0)
supr_nb_visits.append(nb_visit)
LOGGER.info("supremum insertion end")
lattice = Lattice(ODRL.ACTIONS)
LOGGER.info("median insertion begin")
# from median
license_levels = []
level_median = 0
measure_arry_med[i] = []
med_times = []
med_nb_visits = []
for j, license in enumerate(licenses):
object_license = deepcopy(license)
if j % 100 == 0:
LOGGER.info("median: {}/{} classified".format(j, len(licenses)))
license_level = object_license.get_level()
t0 = time.time()
if license_levels:
level_median = median(license_levels)
if license_level > level_median:
nb_visit = add_license_to_lattice(object_license, lattice, method='supremum', license_levels=license_levels)
else:
nb_visit = add_license_to_lattice(object_license, lattice, method='infimum', license_levels=license_levels)
t1 = time.time()
if measure == 'time':
measure_arry_med[i].append(t1-t0)
else:
measure_arry_med[i].append(nb_visit)
med_times.append(t1-t0)
med_nb_visits.append(nb_visit)
LOGGER.info("median insertion end")
lattice = Lattice(ODRL.ACTIONS)
CSVExporter.export(inf_times, inf_nb_visits, supr_times, supr_nb_visits, med_times, med_nb_visits, structure, order, limit, measure, nb_exec, aggregate)
response = HttpResponse(
content_type='application/json',
status=201,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
@need_auth
@fn_timer
def quadratic_experiment(request):
LOGGER.info("begin quadratic experiment")
nb_exec = int(request.GET.get('executions', '1'))
step = int(request.GET.get('step', '100'))
# We do not check viability
# Add from the bottom
licenses = LicenseGenerator.generate('lattice')
fieldnames = ['nb_nodes', 'nb_visits', 'time']
for ex in range(0, nb_exec):
with open('expermiental_results/quadratic_exec{}.csv'.format(ex), 'w+') as csvfile:
csv.DictWriter(csvfile, fieldnames=fieldnames).writeheader()
jobs = []
lattice = Lattice(ODRL.ACTIONS)
for nb_licenses in xrange(0, len(licenses), step):
for ex in range(0, nb_exec):
LOGGER.info("begin quadratic experiment [{} random licenses/exec {}]".format(nb_licenses, ex))
p = multiprocessing.Process(target=experiment_process, args=(nb_licenses, licenses, ex, fieldnames, deepcopy(lattice),))
jobs.append(p)
p.start()
response = HttpResponse(
content_type='application/json',
status=201,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def experiment_process(nb_licenses, licenses, ex, fieldnames, lattice):
random_licenses = random.sample(licenses, nb_licenses)
lattice = Lattice(ODRL.ACTIONS)
license_levels = []
level_median = 0
nb_visits = 0
t0 = time.time()
for object_license in random_licenses:
license_level = object_license.get_level()
if license_levels:
level_median = median(license_levels)
if license_level > level_median:
nb_visit = add_license_to_lattice(object_license, lattice, method='supremum', license_levels=license_levels)
else:
nb_visit = add_license_to_lattice(object_license, lattice, method='infimum', license_levels=license_levels)
nb_visits += nb_visit
t1 = time.time()
total_time = t1-t0
lattice = Lattice(ODRL.ACTIONS)
with open('expermiental_results/quadratic_exec{}.csv'.format(ex), 'a+') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'nb_nodes': nb_licenses, 'nb_visits': nb_visits, 'time': total_time})
@need_auth
def add_license(request, graph):
json_licenses = json.loads(request.body)
added_licenses = []
# random.shuffle(json_licenses)
license_levels = []
# level_median = 0
'''
try:
with open(LEVELS_FILE, 'r') as f:
license_levels = json.load(f)
except IOError:
pass
'''
for json_license in json_licenses:
object_license = License()
object_license.from_json(json_license)
if object_license.contains_only_odrl_actions():
if Constraints.is_license_viable(object_license):
object_license, nb_visit = add_license_to_db(object_license, method='infimum', license_levels=license_levels, graph=graph)
'''
if license_levels:
level_median = median(license_levels)
if object_license.get_level() > level_median:
object_license, nb_visit = add_license_to_db(object_license, method='supremum', license_levels=license_levels, graph=graph)
else:
object_license, nb_visit = add_license_to_db(object_license, method='infimum', license_levels=license_levels, graph=graph)
'''
added_licenses.append(object_license.to_json())
else:
added_licenses.append("Not a valid license: License is non-viable")
else:
added_licenses.append("Not a valid license: Use only ODRL actions")
'''
with open(LEVELS_FILE, 'w') as outfile:
json.dump(license_levels, outfile)
'''
response = HttpResponse(
json.dumps(added_licenses),
content_type='application/json',
status=201,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def add_license_to_db(object_license, method='infimum', license_levels=[], viability_check=True, nb_visit=0, graph='ld'):
neo_license = LicenseModel.nodes.filter(graph__exact=graph).get_or_none(hashed_sets=object_license.hash())
if neo_license:
# update of labels list if needed
neo_license.labels = list(set(object_license.get_labels()).union(neo_license.labels))
neo_license.save()
else:
# license does not exists in db
if method == 'infimum':
neo_license, nb_visit = update_licenses_relations_infimum(object_license, viability_check, nb_visit, graph)
else:
neo_license, nb_visit = update_licenses_relations_supremum(object_license, viability_check, nb_visit, graph)
license_levels.append(object_license.get_level())
for dataset in object_license.get_datasets():
neo_dataset = DatasetModel.nodes.filter(graph__exact=graph).get_or_none(hashed_uri=dataset.hash())
if not neo_dataset:
neo_dataset = NeoFactory.NeoDataset(dataset, graph)
neo_dataset.save()
neo_license.datasets.connect(neo_dataset)
object_license = ObjectFactory.objectLicense(neo_license)
return object_license, nb_visit
def add_license_to_lattice(object_license, lattice, method='infimum', license_levels=[], nb_visit=0):
# We consider that object_license is not in the lattice
if method == 'infimum':
nb_visit = update_licenses_relations_infimum_lattice(object_license, lattice, nb_visit)
else:
nb_visit = update_licenses_relations_supremum_lattice(object_license, lattice, nb_visit)
license_levels.append(object_license.get_level())
return nb_visit
def update_licenses_relations_infimum(object_license, viability_check, nb_visit, graph='ld'):
tested_licenses = [object_license]
license_leaves = get_leaf_licenses(graph)
neo_license = NeoFactory.NeoLicense(object_license, graph)
neo_license.save()
for neo_license_leaf in license_leaves:
object_license_leaf = ObjectFactory.objectLicense(neo_license_leaf)
if object_license.is_preceding(object_license_leaf) and (Constraints.is_compatibility_viable(object_license, object_license_leaf) or not viability_check):
update_transitivity_follower(neo_license, object_license_leaf)
neo_license_leaf.precedings.connect(neo_license)
else:
nb_visit = update_licenses_relations_infimum_rec(neo_license, object_license, neo_license_leaf, object_license_leaf, viability_check, nb_visit, tested_licenses)
return neo_license, nb_visit
def update_licenses_relations_infimum_lattice(object_license, lattice, nb_visit, graph='ld'):
tested_licenses = [object_license]
license_leaves = [lattice.get_infimum()]
lattice.add_license(object_license)
for object_license_leaf in license_leaves:
if object_license.is_preceding(object_license_leaf):
object_license_leaf.precedings.append(object_license)
object_license.followings.append(object_license_leaf)
else:
nb_visit = update_licenses_relations_infimum_lattice_rec(object_license, object_license_leaf, lattice, nb_visit, tested_licenses)
return nb_visit
def update_licenses_relations_supremum(object_license, viability_check, nb_visit, graph='ld'):
tested_licenses = [object_license]
license_roots = get_root_licenses(graph)
neo_license = NeoFactory.NeoLicense(object_license, graph)
neo_license.save()
for neo_license_root in license_roots:
object_license_root = ObjectFactory.objectLicense(neo_license_root)
if object_license.is_following(object_license_root) and (Constraints.is_compatibility_viable(object_license_root, object_license) or not viability_check):
update_transitivity_preceder(neo_license, object_license_root)
neo_license_root.followings.connect(neo_license)
else:
nb_visit = update_licenses_relations_supremum_rec(neo_license, object_license, neo_license_root, object_license_root, viability_check, nb_visit, tested_licenses)
return neo_license, nb_visit
def update_licenses_relations_supremum_lattice(object_license, lattice, nb_visit):
tested_licenses = [object_license]
license_roots = [lattice.get_supremum()]
lattice.add_license(object_license)
for object_license_root in license_roots:
if object_license.is_following(object_license_root):
object_license_root.followings.append(object_license)
object_license.precedings.append(object_license_root)
else:
nb_visit = update_licenses_relations_supremum_lattice_rec(object_license, object_license_root, lattice, nb_visit, tested_licenses)
return nb_visit
def update_licenses_relations_infimum_rec(new_neo_license, new_object_license, neo_license, object_license, viability_check, nb_visit, tested_licenses):
# update precedings and followings of license recursively.
if object_license in tested_licenses:
return nb_visit
nb_visit += 1
tested_licenses.append(object_license)
grand_follower = False
for neo_license_following in neo_license.followings:
object_license_following = ObjectFactory.objectLicense(neo_license_following)
if already_follower(object_license_following, new_neo_license):
continue
if new_object_license.is_preceding(object_license_following) and (Constraints.is_compatibility_viable(new_object_license, object_license_following) or not viability_check):
update_transitivity_follower(new_neo_license, object_license_following)
new_neo_license.followings.connect(neo_license_following)
if new_object_license.is_following(object_license) and (Constraints.is_compatibility_viable(object_license, new_object_license) or not viability_check):
new_neo_license.precedings.connect(neo_license)
neo_license.followings.disconnect(neo_license_following)
else:
if new_object_license.is_following(object_license_following) and (Constraints.is_compatibility_viable(object_license_following, new_object_license) or not viability_check):
grand_follower = True
nb_visit = update_licenses_relations_infimum_rec(new_neo_license, new_object_license, neo_license_following, object_license_following, viability_check, nb_visit, tested_licenses)
if not grand_follower and (new_object_license.is_following(object_license) and (Constraints.is_compatibility_viable(object_license, new_object_license) or not viability_check)):
new_neo_license.precedings.connect(neo_license)
return nb_visit
def update_licenses_relations_infimum_lattice_rec(new_object_license, object_license, lattice, nb_visit, tested_licenses):
# update precedings and followings of license recursively.
if object_license in tested_licenses:
return nb_visit
nb_visit += 1
tested_licenses.append(object_license)
grand_follower = False
for object_license_following in object_license.get_followings():
if already_follower_lattice(object_license_following, new_object_license) or object_license_following == new_object_license:
continue
if new_object_license.is_preceding(object_license_following):
update_transitivity_follower_lattice(new_object_license, object_license_following)
new_object_license.followings.append(object_license_following)
object_license_following.precedings.append(new_object_license)
if new_object_license.is_following(object_license):
new_object_license.precedings.append(object_license)
object_license.followings.append(new_object_license)
object_license.followings.remove(object_license_following)
object_license_following.precedings.remove(object_license)
else:
if new_object_license.is_following(object_license_following):
grand_follower = True
nb_visit = update_licenses_relations_infimum_lattice_rec(new_object_license, object_license_following, lattice, nb_visit, tested_licenses)
if not grand_follower and new_object_license.is_following(object_license):
new_object_license.precedings.append(object_license)
object_license.followings.append(new_object_license)
return nb_visit
def update_licenses_relations_supremum_lattice_rec(new_object_license, object_license, lattice, nb_visit, tested_licenses):
# update precedings and followings of license recursively.
if object_license in tested_licenses:
return nb_visit
nb_visit += 1
tested_licenses.append(object_license)
grand_preceder = False
for object_license_preceding in object_license.get_precedings():
if already_preceder_lattice(object_license_preceding, new_object_license) or object_license_preceding == new_object_license:
continue
if new_object_license.is_following(object_license_preceding):
update_transitivity_preceder_lattice(new_object_license, object_license_preceding)
new_object_license.precedings.append(object_license_preceding)
object_license_preceding.followings.append(new_object_license)
if new_object_license.is_preceding(object_license):
new_object_license.followings.append(object_license)
object_license.precedings.append(new_object_license)
object_license.precedings.remove(object_license_preceding)
object_license_preceding.followings.remove(object_license)
else:
if new_object_license.is_preceding(object_license_preceding):
grand_preceder = True
nb_visit = update_licenses_relations_supremum_lattice_rec(new_object_license, object_license_preceding, lattice, nb_visit, tested_licenses)
if not grand_preceder and new_object_license.is_preceding(object_license):
new_object_license.followings.append(object_license)
object_license.precedings.append(new_object_license)
return nb_visit
def update_licenses_relations_supremum_rec(new_neo_license, new_object_license, neo_license, object_license, viability_check, nb_visit, tested_licenses):
# update precedings and followings of license recursively.
if object_license in tested_licenses:
return nb_visit
nb_visit += 1
tested_licenses.append(object_license)
grand_preceder = False
for neo_license_preceding in neo_license.precedings:
object_license_preceding = ObjectFactory.objectLicense(neo_license_preceding)
if already_preceder(object_license_preceding, new_neo_license):
continue
if new_object_license.is_following(object_license_preceding) and (Constraints.is_compatibility_viable(object_license_preceding, new_object_license) or not viability_check):
update_transitivity_preceder(new_neo_license, object_license_preceding)
new_neo_license.precedings.connect(neo_license_preceding)
if new_object_license.is_preceding(object_license) and (Constraints.is_compatibility_viable(new_object_license, object_license) or not viability_check):
new_neo_license.followings.connect(neo_license)
neo_license.precedings.disconnect(neo_license_preceding)
else:
if new_object_license.is_preceding(object_license_preceding) and (Constraints.is_compatibility_viable(new_object_license, object_license_preceding) or not viability_check):
grand_preceder = True
nb_visit = update_licenses_relations_supremum_rec(new_neo_license, new_object_license, neo_license_preceding, object_license_preceding, viability_check, nb_visit, tested_licenses)
if not grand_preceder and (new_object_license.is_preceding(object_license) and (Constraints.is_compatibility_viable(new_object_license, object_license) or not viability_check)):
new_neo_license.followings.connect(neo_license)
return nb_visit
def already_follower(object_license, new_neo_license):
for neo_follower in new_neo_license.followings:
object_follower = ObjectFactory.objectLicense(neo_follower)
if object_license != object_follower and object_license.is_following(object_follower):
return True
return False
def already_follower_lattice(object_license, new_object_license):
for object_follower in new_object_license.followings:
if object_license != object_follower and object_license.is_following(object_follower):
return True
return False
def already_preceder(object_license, new_neo_license):
for neo_preceder in new_neo_license.precedings:
object_preceder = ObjectFactory.objectLicense(neo_preceder)
if object_license != object_preceder and object_license.is_preceding(object_preceder):
return True
return False
def already_preceder_lattice(object_license, new_object_license):
for object_preceder in new_object_license.precedings:
if object_license != object_preceder and object_license.is_preceding(object_preceder):
return True
return False
def update_transitivity_follower(new_neo_license, new_object_follower):
for neo_follower in new_neo_license.followings:
object_follower = ObjectFactory.objectLicense(neo_follower)
if object_follower.is_following(new_object_follower):
new_neo_license.followings.disconnect(neo_follower)
def update_transitivity_follower_lattice(new_object_license, new_object_follower):
for object_follower in new_object_license.followings:
if object_follower.is_following(new_object_follower):
new_object_license.followings.remove(object_follower)
object_follower.precedings.remove(new_object_license)
def update_transitivity_preceder(new_neo_license, new_object_preceder):
for neo_preceder in new_neo_license.precedings:
object_preceder = ObjectFactory.objectLicense(neo_preceder)
if object_preceder.is_preceding(new_object_preceder):
new_neo_license.precedings.disconnect(neo_preceder)
def update_transitivity_preceder_lattice(new_object_license, new_object_preceder):
for object_preceder in new_object_license.precedings:
if object_preceder.is_preceding(new_object_preceder):
new_object_license.precedings.remove(object_preceder)
object_preceder.followings.remove(new_object_license)
@need_auth
def delete_license(request):
clear_neo4j_database(db)
try:
with open(LEVELS_FILE, 'w') as outfile:
json.dump([], outfile)
except IOError:
pass
response = HttpResponse(
'',
content_type='application/json',
status=200,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@fn_timer
@require_http_methods(['GET'])
def get_compliant(request, hashed_sets, graph):
try:
neo_licenses = get_compatible_licenses(hashed_sets, graph)
compatible_licenses = []
for neo_license in neo_licenses:
license_object = ObjectFactory.objectLicense(neo_license)
compatible_licenses.append(license_object.to_json())
response = HttpResponse(
json.dumps(compatible_licenses),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"[]",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
@fn_timer
@require_http_methods(['GET'])
def get_compatible(request, hashed_sets, graph):
try:
neo_licenses = get_compliant_licenses(hashed_sets, graph)
compatible_licenses = []
for neo_license in neo_licenses:
license_object = ObjectFactory.objectLicense(neo_license)
compatible_licenses.append(license_object.to_json())
response = HttpResponse(
json.dumps(compatible_licenses),
content_type='application/json')
except DoesNotExist:
response = HttpResponse(
"[]",
content_type='application/json',
status=404,
)
response['Access-Control-Allow-Origin'] = '*'
return response
def export_licenses(request, graph, serialization_format):
licenses = []
if serialization_format not in ['n3', 'nt', 'xml', 'turtle', 'json-ld']:
serialization_format = 'turtle'
for neo_license in LicenseModel.nodes.filter(graph__exact=graph):
license_object = ObjectFactory.objectLicense(neo_license)
license_object = license_object.to_json()
license_object['compatible_licenses'] = []
for compatible_neo_license in neo_license.followings.all():
compatible_license = ObjectFactory.objectLicense(compatible_neo_license)
license_object['compatible_licenses'].append(compatible_license.hash())
licenses.append(license_object)
rdf_licenses = RDFExporter.get_rdf(licenses, graph)
RDFExporter.add_meta_license(rdf_licenses, graph, request.build_absolute_uri())
response = HttpResponse(
rdf_licenses.serialize(format=serialization_format),
content_type='text/{}'.format(serialization_format))
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_graph(request, graph):
nodes = []
links = []
for neo_license in LicenseModel.nodes.filter(graph__exact=graph):
license_object = ObjectFactory.objectLicense(neo_license)
nodes.append(D3jsData.license_node(license_object))
license_level = license_object.get_level()
for neo_dataset in neo_license.datasets.all():
dataset_object = ObjectFactory.objectDataset(neo_dataset)
nodes.append(D3jsData.dataset_node(dataset_object, license_level))
links.append(D3jsData.dataset_link(license_object, dataset_object))
for compatible_neo_license in neo_license.followings.all():
compatible_license_object = ObjectFactory.objectLicense(compatible_neo_license)
links.append(D3jsData.compatible_link(license_object, compatible_license_object))
response = HttpResponse(
json.dumps(D3jsData.graph(nodes, links)),
content_type='application/json')
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET'])
def get_cali_ontology(request):
mapping = Graph().parse('./cali_webservice/templates/cali_ontology.ttl', format='ttl')
response = HttpResponse(
mapping.serialize(format='turtle'),
content_type='text/turtle; charset=utf-8')
response['Access-Control-Allow-Origin'] = '*'
return response
@require_http_methods(['GET', 'HEAD', 'OPTIONS'])
def tpf_endpoint(request, graph):
page = int(request.GET.get('page', '1'))
subject = request.GET.get('subject')
subject = URIRef(subject) if subject else None
predicate = request.GET.get('predicate')
predicate = URIRef(predicate) if predicate else None
obj = request.GET.get('object')
obj = URIRef(obj) if obj else None
if obj is not None:
try:
URL_VALIDATOR(obj)
obj = URIRef(obj)
except ValidationError:
obj = _string_to_literal(obj)
fragment = RDFExporter.get_fragment(request, subject, predicate, obj, page, graph)
response = HttpResponse(
fragment.serialize(format="trig", encoding="utf-8"),
content_type='application/trig; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="twitter_tpf_fragment.trig"'
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Headers'] = 'Accept-Datetime,Accept'
return response
def _string_to_literal(string):
splited_literal = string.split('"')
value = splited_literal[1]
datatype = splited_literal[2].split('^^')[1] if splited_literal[2] else None
try:
URL_VALIDATOR(datatype)
datatype = URIRef(datatype)
except ValidationError:
datatype = None
return Literal(value, datatype=datatype)
|
core.py
|
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2021> Gabriel Falcão <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import io
import time
import codecs
import contextlib
import functools
import hashlib
import inspect
import logging
import itertools
import json
import types
import re
import socket
import tempfile
import threading
import traceback
import warnings
from functools import partial
from typing import Callable
from .compat import (
BaseClass,
BaseHTTPRequestHandler,
quote,
quote_plus,
urlencode,
encode_obj,
urlunsplit,
urlsplit,
parse_qs,
unquote_utf8,
)
from .http import (
STATUSES,
HttpBaseClass,
parse_requestline,
last_requestline,
)
from .utils import (
utf8,
decode_utf8,
)
from .errors import HTTPrettyError, UnmockedError
from datetime import datetime
from datetime import timedelta
from errno import EAGAIN
old_socket = socket.socket
old_socketpair = getattr(socket, 'socketpair', None)
old_SocketType = socket.SocketType
old_create_connection = socket.create_connection
old_gethostbyname = socket.gethostbyname
old_gethostname = socket.gethostname
old_getaddrinfo = socket.getaddrinfo
old_socksocket = None
old_ssl_wrap_socket = None
old_sslwrap_simple = None
old_sslsocket = None
old_sslcontext_wrap_socket = None
old_sslcontext = None
MULTILINE_ANY_REGEX = re.compile(r'.*', re.M)
hostname_re = re.compile(r'\^?(?:https?://)?[^:/]*[:/]?')
logger = logging.getLogger(__name__)
try: # pragma: no cover
import socks
old_socksocket = socks.socksocket
except ImportError:
socks = None
try: # pragma: no cover
import ssl
old_sslcontext_class = ssl.SSLContext
old_sslcontext = ssl.create_default_context()
old_ssl_wrap_socket = old_sslcontext.wrap_socket
try:
old_sslcontext_wrap_socket = ssl.SSLContext.wrap_socket
except AttributeError:
pass
old_sslsocket = ssl.SSLSocket
except ImportError: # pragma: no cover
ssl = None
try:
import _ssl
except ImportError:
_ssl = None
# used to handle error caused by ndg-httpsclient
pyopenssl_overrides_inject = []
pyopenssl_overrides_extract = []
try:
from requests.packages.urllib3.contrib.pyopenssl import inject_into_urllib3, extract_from_urllib3
pyopenssl_overrides_extract.append(extract_from_urllib)
pyopenssl_overrides_inject.append(inject_from_urllib)
except Exception:
pass
try:
from urllib3.contrib.pyopenssl import extract_from_urllib3, inject_into_urllib3
pyopenssl_overrides_extract.append(extract_from_urllib)
pyopenssl_overrides_inject.append(inject_from_urllib)
except Exception:
pass
try:
import requests.packages.urllib3.connection as requests_urllib3_connection
old_requests_ssl_wrap_socket = requests_urllib3_connection.ssl_wrap_socket
except ImportError:
requests_urllib3_connection = None
old_requests_ssl_wrap_socket = None
try:
import eventlet
import eventlet.green
except ImportError:
eventlet = None
DEFAULT_HTTP_PORTS = frozenset([80])
POTENTIAL_HTTP_PORTS = set(DEFAULT_HTTP_PORTS)
DEFAULT_HTTPS_PORTS = frozenset([443])
POTENTIAL_HTTPS_PORTS = set(DEFAULT_HTTPS_PORTS)
def FALLBACK_FUNCTION(x):
return x
class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass):
r"""Represents a HTTP request. It takes a valid multi-line,
``\r\n`` separated string with HTTP headers and parse them out using
the internal `parse_request` method.
It also replaces the `rfile` and `wfile` attributes with :py:class:`io.BytesIO`
instances so that we guarantee that it won't make any I/O, neither
for writing nor reading.
It has some convenience attributes:
``headers`` -> a mimetype object that can be cast into a dictionary,
contains all the request headers
``protocol`` -> the protocol of this host, inferred from the port
of the underlying fake TCP socket.
``host`` -> the hostname of this request.
``url`` -> the full url of this request.
``path`` -> the path of the request.
``method`` -> the HTTP method used in this request.
``querystring`` -> a dictionary containing lists with the
attributes. Please notice that if you need a single value from a
query string you will need to get it manually like:
``body`` -> the request body as a string.
``parsed_body`` -> the request body parsed by ``parse_request_body``.
.. testcode::
>>> request.querystring
{'name': ['Gabriel Falcao']}
>>> print request.querystring['name'][0]
"""
def __init__(self, headers, body='', sock=None, path_encoding = 'iso-8859-1'):
# first of all, lets make sure that if headers or body are
# unicode strings, it must be converted into a utf-8 encoded
# byte string
self.created_at = time.time()
self.raw_headers = utf8(headers.strip())
self._body = utf8(body)
self.connection = sock
# Now let's concatenate the headers with the body, and create
# `rfile` based on it
self.rfile = io.BytesIO(b'\r\n\r\n'.join([self.raw_headers, self.body]))
# Creating `wfile` as an empty BytesIO, just to avoid any
# real I/O calls
self.wfile = io.BytesIO()
# parsing the request line preemptively
self.raw_requestline = self.rfile.readline()
# initiating the error attributes with None
self.error_code = None
self.error_message = None
# Parse the request based on the attributes above
if not self.parse_request():
return
# Now 2 convenient attributes for the HTTPretty API:
# - `path`
# - `querystring` holds a dictionary with the parsed query string
# - `parsed_body` a string
try:
self.path = self.path.encode(path_encoding)
except UnicodeDecodeError:
pass
self.path = decode_utf8(self.path)
qstring = self.path.split("?", 1)[-1]
self.querystring = self.parse_querystring(qstring)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
"""a dictionary containing parsed request body or None if
HTTPrettyRequest doesn't know how to parse it. It currently
supports parsing body data that was sent under the
``content`-type` headers values: ``application/json`` or
``application/x-www-form-urlencoded``
"""
self.parsed_body = self.parse_request_body(self._body)
@property
def method(self):
"""the HTTP method used in this request"""
return self.command
@property
def protocol(self):
"""the protocol used in this request"""
proto = ''
if not self.connection:
return ''
elif self.connection.is_http:
proto = 'http'
if self.connection.is_secure:
proto = 'https'
return proto
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
self.parsed_body = self.parse_request_body(self._body)
def __nonzero__(self):
return bool(self.body) or bool(self.raw_headers)
@property
def url(self):
"""the full url of this recorded request"""
return "{}://{}{}".format(self.protocol, self.host, self.path)
@property
def host(self):
return self.headers.get('Host') or '<unknown>'
def __str__(self):
tmpl = '<HTTPrettyRequest("{}", "{}", headers={}, body={})>'
return tmpl.format(
self.method,
self.url,
dict(self.headers),
len(self.body),
)
def parse_querystring(self, qs):
"""parses an UTF-8 encoded query string into a dict of string lists
:param qs: a querystring
:returns: a dict of lists
"""
expanded = unquote_utf8(qs)
parsed = parse_qs(expanded)
result = {}
for k in parsed:
result[k] = list(map(decode_utf8, parsed[k]))
return result
def parse_request_body(self, body):
"""Attempt to parse the post based on the content-type passed.
Return the regular body if not
:param body: string
:returns: a python object such as dict or list in case the deserialization suceeded. Else returns the given param ``body``
"""
PARSING_FUNCTIONS = {
'application/json': json.loads,
'text/json': json.loads,
'application/x-www-form-urlencoded': self.parse_querystring,
}
content_type = self.headers.get('content-type', '')
do_parse = PARSING_FUNCTIONS.get(content_type, FALLBACK_FUNCTION)
try:
body = decode_utf8(body)
return do_parse(body)
except Exception:
return body
class EmptyRequestHeaders(dict):
"""A dict subclass used as internal representation of empty request
headers
"""
class HTTPrettyRequestEmpty(object):
"""Represents an empty :py:class:`~httpretty.core.HTTPrettyRequest`
where all its properties are somehow empty or ``None``
"""
method = None
url = None
body = ''
headers = EmptyRequestHeaders()
class FakeSockFile(object):
"""Fake socket file descriptor. Under the hood all data is written in
a temporary file, giving it a real file descriptor number.
"""
def __init__(self):
self.file = tempfile.TemporaryFile()
self._fileno = self.file.fileno()
def getvalue(self):
if hasattr(self.file, 'getvalue'):
return self.file.getvalue()
else:
return self.file.read()
def close(self):
self.socket.close()
def fileno(self):
return self._fileno
def __getattr__(self, name):
return getattr(self.file, name)
def __del__(self):
try:
self.close()
except (ValueError, AttributeError):
pass
class FakeSSLSocket(object):
"""Shorthand for :py:class:`~httpretty.core.fakesock`
"""
def __init__(self, sock, *args, **kw):
self._httpretty_sock = sock
def __getattr__(self, attr):
return getattr(self._httpretty_sock, attr)
class FakeAddressTuple(object):
def __init__(self, fakesocket):
self.fakesocket = fakesocket
def __getitem__(self, *args, **kw):
raise AssertionError('socket {} is not connected'.format(self.fakesocket.truesock))
def fake_socketpair(*args, **kw):
with restored_libs():
return old_socketpair(*args, **kw)
class fakesock(object):
"""
fake :py:mod:`socket`
"""
class socket(object):
"""drop-in replacement for :py:class:`socket.socket`
"""
_entry = None
_read_buf = None
debuglevel = 0
_sent_data = []
is_secure = False
def __init__(
self,
family=socket.AF_INET,
type=socket.SOCK_STREAM,
proto=0,
fileno=None
):
self.socket_family = family
self.socket_type = type
self.socket_proto = proto
if httpretty.allow_net_connect:
self.truesock = self.create_socket()
else:
self.truesock = None
self._address = FakeAddressTuple(self)
self.__truesock_is_connected__ = False
self.fd = FakeSockFile()
self.fd.socket = fileno or self
self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
self._sock = fileno or self
self.is_http = False
self._bufsize = 32 * 1024
def __repr__(self):
return '{self.__class__.__module__}.{self.__class__.__name__}("{self.host}")'.format(**locals())
@property
def host(self):
return ":".join(map(str, self._address))
def create_socket(self, address=None):
return old_socket(self.socket_family, self.socket_type, self.socket_proto)
def getpeercert(self, *a, **kw):
now = datetime.now()
shift = now + timedelta(days=30 * 12)
return {
'notAfter': shift.strftime('%b %d %H:%M:%S GMT'),
'subjectAltName': (
('DNS', '*.%s' % self._host),
('DNS', self._host),
('DNS', '*'),
),
'subject': (
(
('organizationName', '*.%s' % self._host),
),
(
('organizationalUnitName',
'Domain Control Validated'),
),
(
('commonName', '*.%s' % self._host),
),
),
}
def ssl(self, sock, *args, **kw):
return sock
def setsockopt(self, level, optname, value):
if httpretty.allow_net_connect and not self.truesock:
self.truesock = self.create_socket()
elif not self.truesock:
logger.debug('setsockopt(%s, %s, %s) failed', level, optname, value)
return
return self.truesock.setsockopt(level, optname, value)
def connect(self, address):
try:
self._address = (self._host, self._port) = address
except ValueError:
# We get here when the address is just a string pointing to a
# unix socket path/file
#
# See issue #206
self.is_http = False
else:
ports_to_check = (
POTENTIAL_HTTP_PORTS.union(POTENTIAL_HTTPS_PORTS))
self.is_http = self._port in ports_to_check
self.is_secure = self._port in POTENTIAL_HTTPS_PORTS
if not self.is_http:
self.connect_truesock(address=address)
elif self.truesock and not self.real_socket_is_connected():
# TODO: remove nested if
matcher = httpretty.match_http_address(self._host, self._port)
if matcher is None:
self.connect_truesock(address=address)
def bind(self, address):
self._address = (self._host, self._port) = address
if self.truesock:
self.bind_truesock(address)
def bind_truesock(self, address):
if httpretty.allow_net_connect and not self.truesock:
self.truesock = self.create_socket()
elif not self.truesock:
raise UnmockedError('Failed to socket.bind() because because a real socket was never created.', address=address)
return self.truesock.bind(address)
def connect_truesock(self, request=None, address=None):
address = address or self._address
if self.__truesock_is_connected__:
return self.truesock
if request:
logger.warning('real call to socket.connect() for {request}'.format(**locals()))
elif address:
logger.warning('real call to socket.connect() for {address}'.format(**locals()))
else:
logger.warning('real call to socket.connect()')
if httpretty.allow_net_connect and not self.truesock:
self.truesock = self.create_socket(address)
elif not self.truesock:
raise UnmockedError('Failed to socket.connect() because because a real socket was never created.', request=request, address=address)
undo_patch_socket()
try:
hostname = self._address[0]
port = 80
if len(self._address) == 2:
port = self._address[1]
if port == 443 and old_sslsocket:
self.truesock = old_ssl_wrap_socket(self.truesock, server_hostname=hostname)
sock = self.truesock
sock.connect(self._address)
self.__truesock_is_connected__ = True
self.truesock = sock
finally:
apply_patch_socket()
return self.truesock
def real_socket_is_connected(self):
return self.__truesock_is_connected__
def fileno(self):
if self.truesock:
return self.truesock.fileno()
return self.fd.fileno()
def close(self):
if self.truesock:
self.truesock.close()
self.truesock = None
self.__truesock_is_connected__ = False
def makefile(self, mode='r', bufsize=-1):
"""Returns this fake socket's own tempfile buffer.
If there is an entry associated with the socket, the file
descriptor gets filled in with the entry data before being
returned.
"""
self._mode = mode
self._bufsize = bufsize
if self._entry:
t = threading.Thread(
target=self._entry.fill_filekind, args=(self.fd,)
)
t.start()
if self.timeout == socket._GLOBAL_DEFAULT_TIMEOUT:
timeout = None
else:
timeout = self.timeout
t.join(timeout)
if t.is_alive():
raise socket.timeout
return self.fd
def real_sendall(self, data, *args, **kw):
"""Sends data to the remote server. This method is called
when HTTPretty identifies that someone is trying to send
non-http data.
The received bytes are written in this socket's tempfile
buffer so that HTTPretty can return it accordingly when
necessary.
"""
request = kw.pop('request', None)
if request:
bytecount = len(data)
logger.warning('{self}.real_sendall({bytecount} bytes) to {request.url} via {request.method} at {request.created_at}'.format(**locals()))
if httpretty.allow_net_connect and not self.truesock:
self.connect_truesock(request=request)
elif not self.truesock:
raise UnmockedError(request=request)
if not self.is_http:
self.truesock.setblocking(1)
return self.truesock.sendall(data, *args, **kw)
sock = self.connect_truesock(request=request)
sock.setblocking(1)
sock.sendall(data, *args, **kw)
should_continue = True
while should_continue:
try:
received = sock.recv(self._bufsize)
self.fd.write(received)
should_continue = bool(received.strip())
except socket.error as e:
if e.errno == EAGAIN:
continue
break
self.fd.seek(0)
def sendall(self, data, *args, **kw):
# if self.__truesock_is_connected__:
# return self.truesock.sendall(data, *args, **kw)
self._sent_data.append(data)
self.fd = FakeSockFile()
self.fd.socket = self
if isinstance(data, str):
data = data.encode('utf-8')
elif not isinstance(data, bytes):
logger.debug('cannot sendall({data!r})')
data = bytes(data)
try:
requestline, _ = data.split(b'\r\n', 1)
method, path, version = parse_requestline(
decode_utf8(requestline))
is_parsing_headers = True
except ValueError:
path = ''
is_parsing_headers = False
if self._entry is None:
# If the previous request wasn't mocked, don't
# mock the subsequent sending of data
return self.real_sendall(data, *args, **kw)
else:
method = self._entry.method
path = self._entry.info.path
self.fd.seek(0)
if not is_parsing_headers:
if len(self._sent_data) > 1:
headers = utf8(last_requestline(self._sent_data))
meta = self._entry.request.headers
body = utf8(self._sent_data[-1])
if meta.get('transfer-encoding', '') == 'chunked':
if not body.isdigit() and (body != b'\r\n') and (body != b'0\r\n\r\n'):
self._entry.request.body += body
else:
self._entry.request.body += body
httpretty.historify_request(headers, body, sock=self)
return
if path[:2] == '//':
path = '//' + path
# path might come with
s = urlsplit(path)
POTENTIAL_HTTP_PORTS.add(int(s.port or 80))
parts = list(map(utf8, data.split(b'\r\n\r\n', 1)))
if len(parts) == 2:
headers, body = parts
else:
headers = ''
body = data
request = httpretty.historify_request(headers, body, sock=self)
info = URIInfo(
hostname=self._host,
port=self._port,
path=s.path,
query=s.query,
last_request=request
)
matcher, entries = httpretty.match_uriinfo(info)
if not entries:
logger.debug('no entries matching {}'.format(request))
self._entry = None
self._read_buf = None
self.real_sendall(data, request=request)
return
self._entry = matcher.get_next_entry(method, info, request)
def forward_and_trace(self, function_name, *a, **kw):
if not self.truesock:
raise UnmockedError('Failed to socket.{}() because because a real socket was never created.'.format(function_name))
callback = getattr(self.truesock, function_name)
return callback(*a, **kw)
def settimeout(self, new_timeout):
self.timeout = new_timeout
if not self.is_http:
if self.truesock:
self.truesock.settimeout(new_timeout)
def send(self, data, *args, **kwargs):
self.sendall(data, *args, **kwargs)
return len(data)
def sendto(self, *args, **kwargs):
return self.forward_and_trace('sendto', *args, **kwargs)
def recvfrom_into(self, *args, **kwargs):
return self.forward_and_trace('recvfrom_into', *args, **kwargs)
def recv_into(self, *args, **kwargs):
return self.forward_and_trace('recv_into', *args, **kwargs)
def recvfrom(self, *args, **kwargs):
return self.forward_and_trace('recvfrom', *args, **kwargs)
def recv(self, buffersize=0, *args, **kwargs):
if not self._read_buf:
self._read_buf = io.BytesIO()
if self._entry:
self._entry.fill_filekind(self._read_buf)
if not self._read_buf:
raise UnmockedError('socket cannot recv(): {!r}'.format(self))
return self._read_buf.read(buffersize)
def __getattr__(self, name):
if name in ('getsockopt', 'selected_alpn_protocol') and not self.truesock:
self.truesock = self.create_socket()
elif httpretty.allow_net_connect and not self.truesock:
# can't call self.connect_truesock() here because we
# don't know if user wants to execute server of client
# calls (or can they?)
self.truesock = self.create_socket()
elif not self.truesock:
# Special case for
# `hasattr(sock, "version")` call added in urllib3>=1.26.
if name == 'version':
raise AttributeError(
"HTTPretty synthesized this error to fix urllib3 compatibility "
"(see issue https://github.com/gabrielfalcao/HTTPretty/issues/409). "
"Please open an issue if this error causes further unexpected issues."
)
raise UnmockedError('Failed to socket.{} because because a real socket does not exist'.format(name))
return getattr(self.truesock, name)
def with_socket_is_secure(sock, kw):
sock.is_secure = True
sock.kwargs = kw
for k, v in kw.items():
setattr(sock, k, v)
return sock
def fake_wrap_socket(orig_wrap_socket_fn, *args, **kw):
"""drop-in replacement for py:func:`ssl.wrap_socket`
"""
if 'sock' in kw:
sock = kw['sock']
else:
sock = args[0]
server_hostname = kw.get('server_hostname')
if server_hostname is not None:
matcher = httpretty.match_https_hostname(server_hostname)
if matcher is None:
logger.debug('no requests registered for hostname: "{}"'.format(server_hostname))
return with_socket_is_secure(sock, kw)
return with_socket_is_secure(sock, kw)
def create_fake_connection(
address,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""drop-in replacement for :py:func:`socket.create_connection`"""
s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
s.settimeout(timeout)
if isinstance(source_address, tuple) and len(source_address) == 2:
source_address[1] = int(source_address[1])
if source_address:
s.bind(source_address)
s.connect(address)
return s
def fake_gethostbyname(host):
"""drop-in replacement for :py:func:`socket.gethostbyname`"""
return '127.0.0.1'
def fake_gethostname():
"""drop-in replacement for :py:func:`socket.gethostname`"""
return 'localhost'
def fake_getaddrinfo(
host, port, family=None, socktype=None, proto=None, flags=None):
"""drop-in replacement for :py:func:`socket.getaddrinfo`"""
return [(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP,
'', (host, port)),
(socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP,
'', (host, port))]
class Entry(BaseClass):
"""Created by :py:meth:`~httpretty.core.httpretty.register_uri` and
stored in memory as internal representation of a HTTP
request/response definition.
Args:
method (str): One of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT``.
uri (str|re.Pattern): The URL to match
adding_headers (dict): Extra headers to be added to the response
forcing_headers (dict): Overwrite response headers.
status (int): The status code for the response, defaults to ``200``.
streaming (bool): Whether should stream the response into chunks via generator.
headers: Headers to inject in the faked response.
Returns:
httpretty.Entry: containing the request-matching metadata.
.. warning:: When using the ``forcing_headers`` option make sure to add the header ``Content-Length`` to match at most the total body length, otherwise some HTTP clients can hang indefinitely.
"""
def __init__(self, method, uri, body,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**headers):
self.method = method
self.uri = uri
self.info = None
self.request = None
self.body_is_callable = False
if hasattr(body, "__call__"):
self.callable_body = body
self.body = None
self.body_is_callable = True
elif isinstance(body, str):
self.body = utf8(body)
else:
self.body = body
self.streaming = streaming
if not streaming and not self.body_is_callable:
self.body_length = len(self.body or '')
else:
self.body_length = 0
self.adding_headers = adding_headers or {}
self.forcing_headers = forcing_headers or {}
self.status = int(status)
for k, v in headers.items():
name = "-".join(k.split("_")).title()
self.adding_headers[name] = v
self.validate()
def validate(self):
"""validates the body size with the value of the ``Content-Length``
header
"""
content_length_keys = 'Content-Length', 'content-length'
for key in content_length_keys:
got = self.adding_headers.get(
key, self.forcing_headers.get(key, None))
if got is None:
continue
igot = None
try:
igot = int(got)
except (ValueError, TypeError):
warnings.warn(
'HTTPretty got to register the Content-Length header '
'with "%r" which is not a number' % got)
return
if igot and igot > self.body_length:
raise HTTPrettyError(
'HTTPretty got inconsistent parameters. The header '
'Content-Length you registered expects size "%d" but '
'the body you registered for that has actually length '
'"%d".' % (
igot, self.body_length,
)
)
def __str__(self):
return r'<Entry {} {} getting {}>'.format(
self.method,
self.uri,
self.status
)
def normalize_headers(self, headers):
"""Normalize keys in header names so that ``COntent-tyPe`` becomes ``content-type``
:param headers: dict
:returns: dict
"""
new = {}
for k in headers:
new_k = '-'.join([s.lower() for s in k.split('-')])
new[new_k] = headers[k]
return new
def fill_filekind(self, fk):
"""writes HTTP Response data to a file descriptor
:parm fk: a file-like object
.. warning:: **side-effect:** this method moves the cursor of the given file object to zero
"""
now = datetime.utcnow()
headers = {
'status': self.status,
'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'),
'server': 'Python/HTTPretty',
'connection': 'close',
}
if self.forcing_headers:
headers = self.forcing_headers
if self.adding_headers:
headers.update(
self.normalize_headers(
self.adding_headers))
headers = self.normalize_headers(headers)
status = headers.get('status', self.status)
if self.body_is_callable:
status, headers, self.body = self.callable_body(self.request, self.info.full_url(), headers)
headers = self.normalize_headers(headers)
# TODO: document this behavior:
if 'content-length' not in headers:
headers.update({
'content-length': len(self.body)
})
string_list = [
'HTTP/1.1 %d %s' % (status, STATUSES[status]),
]
if 'date' in headers:
string_list.append('date: %s' % headers.pop('date'))
if not self.forcing_headers:
content_type = headers.pop('content-type',
'text/plain; charset=utf-8')
content_length = headers.pop('content-length',
self.body_length)
string_list.append('content-type: %s' % content_type)
if not self.streaming:
string_list.append('content-length: %s' % content_length)
server = headers.pop('server', None)
if server:
string_list.append('server: %s' % server)
for k, v in headers.items():
string_list.append(
'{}: {}'.format(k, v),
)
for item in string_list:
fk.write(utf8(item) + b'\n')
fk.write(b'\r\n')
if self.streaming:
self.body, body = itertools.tee(self.body)
for chunk in body:
fk.write(utf8(chunk))
else:
fk.write(utf8(self.body))
fk.seek(0)
def url_fix(s, charset=None):
"""escapes special characters
"""
if charset:
warnings.warn("{}.url_fix() charset argument is deprecated".format(__name__), DeprecationWarning)
scheme, netloc, path, querystring, fragment = urlsplit(s)
path = quote(path, b'/%')
querystring = quote_plus(querystring, b':&=')
return urlunsplit((scheme, netloc, path, querystring, fragment))
class URIInfo(BaseClass):
"""Internal representation of `URIs <https://en.wikipedia.org/wiki/Uniform_Resource_Identifier>`_
.. tip:: all arguments are optional
:param username:
:param password:
:param hostname:
:param port:
:param path:
:param query:
:param fragment:
:param scheme:
:param last_request:
"""
default_str_attrs = (
'username',
'password',
'hostname',
'port',
'path',
)
def __init__(self,
username='',
password='',
hostname='',
port=80,
path='/',
query='',
fragment='',
scheme='',
last_request=None):
self.username = username or ''
self.password = password or ''
self.hostname = hostname or ''
if port:
port = int(port)
elif scheme == 'https':
port = 443
self.port = port or 80
self.path = path or ''
if query:
query_items = sorted(parse_qs(query).items())
self.query = urlencode(
encode_obj(query_items),
doseq=True,
)
else:
self.query = ''
if scheme:
self.scheme = scheme
elif self.port in POTENTIAL_HTTPS_PORTS:
self.scheme = 'https'
else:
self.scheme = 'http'
self.fragment = fragment or ''
self.last_request = last_request
def to_str(self, attrs):
fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs])
return r'<httpretty.URIInfo(%s)>' % fmt
def __str__(self):
return self.to_str(self.default_str_attrs)
def str_with_query(self):
attrs = self.default_str_attrs + ('query',)
return self.to_str(attrs)
def __hash__(self):
return int(hashlib.sha1(bytes(self, 'ascii')).hexdigest(), 16)
def __eq__(self, other):
self_tuple = (
self.port,
decode_utf8(self.hostname.lower()),
url_fix(decode_utf8(self.path)),
)
other_tuple = (
other.port,
decode_utf8(other.hostname.lower()),
url_fix(decode_utf8(other.path)),
)
return self_tuple == other_tuple
def full_url(self, use_querystring=True):
"""
:param use_querystring: bool
:returns: a string with the full url with the format ``{scheme}://{credentials}{domain}{path}{query}``
"""
credentials = ""
if self.password:
credentials = "{}:{}@".format(
self.username, self.password)
query = ""
if use_querystring and self.query:
query = "?{}".format(decode_utf8(self.query))
result = "{scheme}://{credentials}{domain}{path}{query}".format(
scheme=self.scheme,
credentials=credentials,
domain=self.get_full_domain(),
path=decode_utf8(self.path),
query=query
)
return result
def get_full_domain(self):
"""
:returns: a string in the form ``{domain}:{port}`` or just the domain if the port is 80 or 443
"""
hostname = decode_utf8(self.hostname)
# Port 80/443 should not be appended to the url
if self.port not in DEFAULT_HTTP_PORTS | DEFAULT_HTTPS_PORTS:
return ":".join([hostname, str(self.port)])
return hostname
@classmethod
def from_uri(cls, uri, entry):
"""
:param uri: string
:param entry: an instance of :py:class:`~httpretty.core.Entry`
"""
result = urlsplit(uri)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
return cls(result.username,
result.password,
result.hostname,
result.port,
result.path,
result.query,
result.fragment,
result.scheme,
entry)
class URIMatcher(object):
regex = None
info = None
def __init__(self, uri, entries, match_querystring=False, priority=0):
self._match_querystring = match_querystring
# CPython, Jython
regex_types = ('SRE_Pattern', 'org.python.modules.sre.PatternObject',
'Pattern')
is_regex = type(uri).__name__ in regex_types
if is_regex:
self.regex = uri
result = urlsplit(uri.pattern)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
else:
self.info = URIInfo.from_uri(uri, entries)
self.entries = entries
self.priority = priority
self.uri = uri
# hash of current_entry pointers, per method.
self.current_entries = {}
def matches(self, info):
if self.info:
# Query string is not considered when comparing info objects, compare separately
return self.info == info and (not self._match_querystring or self.info.query == info.query)
else:
return self.regex.search(info.full_url(
use_querystring=self._match_querystring))
def __str__(self):
wrap = 'URLMatcher({})'
if self.info:
if self._match_querystring:
return wrap.format(str(self.info.str_with_query()))
else:
return wrap.format(str(self.info))
else:
return wrap.format(self.regex.pattern)
def get_next_entry(self, method, info, request):
"""Cycle through available responses, but only once.
Any subsequent requests will receive the last response"""
if method not in self.current_entries:
self.current_entries[method] = 0
# restrict selection to entries that match the requested
# method
entries_for_method = [e for e in self.entries if e.method == method]
if self.current_entries[method] >= len(entries_for_method):
self.current_entries[method] = -1
if not self.entries or not entries_for_method:
raise ValueError('I have no entries for method %s: %s'
% (method, self))
entry = entries_for_method[self.current_entries[method]]
if self.current_entries[method] != -1:
self.current_entries[method] += 1
# Create a copy of the original entry to make it thread-safe
body = entry.callable_body if entry.body_is_callable else entry.body
new_entry = Entry(entry.method, entry.uri, body,
status=entry.status,
streaming=entry.streaming,
adding_headers=entry.adding_headers,
forcing_headers=entry.forcing_headers)
# Attach more info to the entry
# So the callback can be more clever about what to do
# This does also fix the case where the callback
# would be handed a compiled regex as uri instead of the
# real uri
new_entry.info = info
new_entry.request = request
return new_entry
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class httpretty(HttpBaseClass):
"""manages HTTPretty's internal request/response registry and request matching.
"""
_entries = {}
latest_requests = []
last_request = HTTPrettyRequestEmpty()
_is_enabled = False
allow_net_connect = True
@classmethod
def match_uriinfo(cls, info):
"""
:param info: an :py:class:`~httpretty.core.URIInfo`
:returns: a 2-item tuple: (:py:class:`~httpretty.core.URLMatcher`, :py:class:`~httpretty.core.URIInfo`) or ``(None, [])``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.matches(info):
return (matcher, info)
return (None, [])
@classmethod
def match_https_hostname(cls, hostname):
"""
:param hostname: a string
:returns: an :py:class:`~httpretty.core.URLMatcher` or ``None``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.info is None:
pattern_with_port = "https://{0}:".format(hostname)
pattern_without_port = "https://{0}/".format(hostname)
hostname_pattern = (
hostname_re
.match(matcher.regex.pattern)
.group(0)
)
for pattern in [pattern_with_port, pattern_without_port]:
if re.match(hostname_pattern, pattern):
return matcher
elif matcher.info.hostname == hostname:
return matcher
return None
@classmethod
def match_http_address(cls, hostname, port):
"""
:param hostname: a string
:param port: an integer
:returns: an :py:class:`~httpretty.core.URLMatcher` or ``None``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.info is None:
if port in POTENTIAL_HTTPS_PORTS:
scheme = 'https://'
else:
scheme = 'http://'
pattern_without_port = "{0}{1}/".format(scheme, hostname)
pattern_with_port = "{0}{1}:{2}/".format(scheme, hostname, port)
hostname_pattern = (
hostname_re
.match(matcher.regex.pattern)
.group(0)
)
for pattern in [pattern_with_port, pattern_without_port]:
if re.match(hostname_pattern, pattern):
return matcher
elif matcher.info.hostname == hostname \
and matcher.info.port == port:
return matcher
return None
@classmethod
@contextlib.contextmanager
def record(cls, filename, indentation=4, encoding='utf-8', verbose=False, allow_net_connect=True, pool_manager_params=None):
"""
.. testcode::
import io
import json
import requests
import httpretty
with httpretty.record('/tmp/ip.json'):
data = requests.get('https://httpbin.org/ip').json()
with io.open('/tmp/ip.json') as fd:
assert data == json.load(fd)
:param filename: a string
:param indentation: an integer, defaults to **4**
:param encoding: a string, defaults to **"utf-8"**
:returns: a `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
try:
import urllib3
except ImportError:
msg = (
'HTTPretty requires urllib3 installed '
'for recording actual requests.'
)
raise RuntimeError(msg)
http = urllib3.PoolManager(**pool_manager_params or {})
cls.enable(allow_net_connect, verbose=verbose)
calls = []
def record_request(request, uri, headers):
cls.disable()
kw = {}
kw.setdefault('body', request.body)
kw.setdefault('headers', dict(request.headers))
response = http.request(request.method, uri, **kw)
calls.append({
'request': {
'uri': uri,
'method': request.method,
'headers': dict(request.headers),
'body': decode_utf8(request.body),
'querystring': request.querystring
},
'response': {
'status': response.status,
'body': decode_utf8(response.data),
# urllib3 1.10 had a bug if you just did:
# dict(response.headers)
# which would cause all the values to become lists
# with the header name as the first item and the
# true value as the second item. Workaround that
'headers': dict(response.headers.items())
}
})
cls.enable(allow_net_connect, verbose=verbose)
return response.status, response.headers, response.data
for method in cls.METHODS:
cls.register_uri(method, MULTILINE_ANY_REGEX, body=record_request)
yield
cls.disable()
with codecs.open(filename, 'w', encoding) as f:
f.write(json.dumps(calls, indent=indentation))
@classmethod
@contextlib.contextmanager
def playback(cls, filename, allow_net_connect=True, verbose=False):
"""
.. testcode::
import io
import json
import requests
import httpretty
with httpretty.record('/tmp/ip.json'):
data = requests.get('https://httpbin.org/ip').json()
with io.open('/tmp/ip.json') as fd:
assert data == json.load(fd)
:param filename: a string
:returns: a `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
cls.enable(allow_net_connect, verbose=verbose)
data = json.loads(open(filename).read())
for item in data:
uri = item['request']['uri']
method = item['request']['method']
body = item['response']['body']
headers = item['response']['headers']
cls.register_uri(method, uri, body=body, forcing_headers=headers)
yield
cls.disable()
@classmethod
def reset(cls):
"""resets the internal state of HTTPretty, unregistering all URLs
"""
POTENTIAL_HTTP_PORTS.intersection_update(DEFAULT_HTTP_PORTS)
POTENTIAL_HTTPS_PORTS.intersection_update(DEFAULT_HTTPS_PORTS)
cls._entries.clear()
cls.latest_requests = []
cls.last_request = HTTPrettyRequestEmpty()
@classmethod
def historify_request(cls, headers, body='', sock=None):
"""appends request to a list for later retrieval
.. testcode::
import httpretty
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip', body='')
with httpretty.enabled():
requests.get('https://httpbin.org/ip')
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
"""
request = HTTPrettyRequest(headers, body, sock=sock)
cls.last_request = request
if request not in cls.latest_requests:
cls.latest_requests.append(request)
else:
pos = cls.latest_requests.index(request)
cls.latest_requests[pos] = request
logger.info("captured: {}".format(request))
return request
@classmethod
def register_uri(cls, method, uri, body='{"message": "HTTPretty :)"}',
adding_headers=None,
forcing_headers=None,
status=200,
responses=None,
match_querystring=False,
priority=0,
**headers):
"""
.. testcode::
import httpretty
def request_callback(request, uri, response_headers):
content_type = request.headers.get('Content-Type')
assert request.body == '{"nothing": "here"}', 'unexpected body: {}'.format(request.body)
assert content_type == 'application/json', 'expected application/json but received Content-Type: {}'.format(content_type)
return [200, response_headers, json.dumps({"hello": "world"})]
httpretty.register_uri(
HTTPretty.POST, "https://httpretty.example.com/api",
body=request_callback)
with httpretty.enabled():
requests.post('https://httpretty.example.com/api', data='{"nothing": "here"}', headers={'Content-Type': 'application/json'})
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
:param method: one of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT``
:param uri: a string or regex pattern (e.g.: **"https://httpbin.org/ip"**)
:param body: a string, defaults to ``{"message": "HTTPretty :)"}``
:param adding_headers: dict - headers to be added to the response
:param forcing_headers: dict - headers to be forcefully set in the response
:param status: an integer, defaults to **200**
:param responses: a list of entries, ideally each created with :py:meth:`~httpretty.core.httpretty.Response`
:param priority: an integer, useful for setting higher priority over previously registered urls. defaults to zero
:param match_querystring: bool - whether to take the querystring into account when matching an URL
:param headers: headers to be added to the response
.. warning:: When using a port in the request, add a trailing slash if no path is provided otherwise Httpretty will not catch the request. Ex: ``httpretty.register_uri(httpretty.GET, 'http://fakeuri.com:8080/', body='{"hello":"world"}')``
"""
uri_is_string = isinstance(uri, str)
if uri_is_string and re.search(r'^\w+://[^/]+[.]\w{2,}(:[0-9]+)?$', uri):
uri += '/'
if isinstance(responses, list) and len(responses) > 0:
for response in responses:
response.uri = uri
response.method = method
entries_for_this_uri = responses
else:
headers['body'] = body
headers['adding_headers'] = adding_headers
headers['forcing_headers'] = forcing_headers
headers['status'] = status
entries_for_this_uri = [
cls.Response(method=method, uri=uri, **headers),
]
matcher = URIMatcher(uri, entries_for_this_uri,
match_querystring, priority)
if matcher in cls._entries:
matcher.entries.extend(cls._entries[matcher])
del cls._entries[matcher]
cls._entries[matcher] = entries_for_this_uri
def __str__(self):
return '<HTTPretty with %d URI entries>' % len(self._entries)
@classmethod
def Response(
cls, body,
method=None,
uri=None,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**kw):
"""Shortcut to create an :py:class:`~httpretty.core.Entry` that takes
the body as first positional argument.
.. seealso:: the parameters of this function match those of
the :py:class:`~httpretty.core.Entry` constructor.
Args:
body (str): The body to return as response..
method (str): One of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT``.
uri (str|re.Pattern): The URL to match
adding_headers (dict): Extra headers to be added to the response
forcing_headers (dict): Overwrite **any** response headers, even "Content-Length".
status (int): The status code for the response, defaults to ``200``.
streaming (bool): Whether should stream the response into chunks via generator.
kwargs: Keyword-arguments are forwarded to :py:class:`~httpretty.core.Entry`
Returns:
httpretty.Entry: containing the request-matching metadata.
"""
kw['body'] = body
kw['adding_headers'] = adding_headers
kw['forcing_headers'] = forcing_headers
kw['status'] = int(status)
kw['streaming'] = streaming
return Entry(method, uri, **kw)
@classmethod
def disable(cls):
"""Disables HTTPretty entirely, putting the original :py:mod:`socket`
module back in its place.
.. code::
import re, json
import httpretty
httpretty.enable()
# request passes through fake socket
response = requests.get('https://httpbin.org')
httpretty.disable()
# request uses real python socket module
response = requests.get('https://httpbin.org')
.. note:: This method does not call :py:meth:`httpretty.core.reset` automatically.
"""
undo_patch_socket()
cls._is_enabled = False
@classmethod
def is_enabled(cls):
"""Check if HTTPretty is enabled
:returns: bool
.. testcode::
import httpretty
httpretty.enable()
assert httpretty.is_enabled() == True
httpretty.disable()
assert httpretty.is_enabled() == False
"""
return cls._is_enabled
@classmethod
def enable(cls, allow_net_connect=True, verbose=False):
"""Enables HTTPretty.
:param allow_net_connect: boolean to determine if unmatched requests are forwarded to a real network connection OR throw :py:class:`httpretty.errors.UnmockedError`.
:param verbose: boolean to set HTTPretty's logging level to DEBUG
.. testcode::
import re, json
import httpretty
httpretty.enable(allow_net_connect=True, verbose=True)
httpretty.register_uri(
httpretty.GET,
re.compile(r'http://.*'),
body=json.dumps({'man': 'in', 'the': 'middle'})
)
response = requests.get('https://foo.bar/foo/bar')
response.json().should.equal({
"man": "in",
"the": "middle",
})
.. warning:: after calling this method the original :py:mod:`socket` is replaced with :py:class:`httpretty.core.fakesock`. Make sure to call :py:meth:`~httpretty.disable` after done with your tests or use the :py:class:`httpretty.enabled` as decorator or `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
httpretty.allow_net_connect = allow_net_connect
apply_patch_socket()
cls._is_enabled = True
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.getLogger().level)
def apply_patch_socket():
# Some versions of python internally shadowed the
# SocketType variable incorrectly https://bugs.python.org/issue20386
bad_socket_shadow = (socket.socket != socket.SocketType)
new_wrap = None
socket.socket = fakesock.socket
socket.socketpair = fake_socketpair
socket._socketobject = fakesock.socket
if not bad_socket_shadow:
socket.SocketType = fakesock.socket
socket.create_connection = create_fake_connection
socket.gethostname = fake_gethostname
socket.gethostbyname = fake_gethostbyname
socket.getaddrinfo = fake_getaddrinfo
socket.__dict__['socket'] = fakesock.socket
socket.__dict__['socketpair'] = fake_socketpair
socket.__dict__['_socketobject'] = fakesock.socket
if not bad_socket_shadow:
socket.__dict__['SocketType'] = fakesock.socket
socket.__dict__['create_connection'] = create_fake_connection
socket.__dict__['gethostname'] = fake_gethostname
socket.__dict__['gethostbyname'] = fake_gethostbyname
socket.__dict__['getaddrinfo'] = fake_getaddrinfo
# Take out the pyopenssl version - use the default implementation
for extract_from_urllib3 in pyopenssl_overrides_extract:
extract_into_urllib3()
if requests_urllib3_connection is not None:
urllib3_wrap = partial(fake_wrap_socket, old_requests_ssl_wrap_socket)
requests_urllib3_connection.ssl_wrap_socket = urllib3_wrap
requests_urllib3_connection.__dict__['ssl_wrap_socket'] = urllib3_wrap
if eventlet:
eventlet.green.ssl.GreenSSLContext = old_sslcontext_class
eventlet.green.ssl.__dict__['GreenSSLContext'] = old_sslcontext_class
eventlet.green.ssl.SSLContext = old_sslcontext_class
eventlet.green.ssl.__dict__['SSLContext'] = old_sslcontext_class
if socks:
socks.socksocket = fakesock.socket
socks.__dict__['socksocket'] = fakesock.socket
if ssl:
new_wrap = partial(fake_wrap_socket, old_ssl_wrap_socket)
ssl.wrap_socket = new_wrap
ssl.SSLSocket = FakeSSLSocket
ssl.SSLContext = old_sslcontext_class
try:
ssl.SSLContext.wrap_socket = partial(fake_wrap_socket, old_ssl_wrap_socket)
except AttributeError:
pass
ssl.__dict__['wrap_socket'] = new_wrap
ssl.__dict__['SSLSocket'] = FakeSSLSocket
ssl.__dict__['SSLContext'] = old_sslcontext_class
def undo_patch_socket():
socket.socket = old_socket
socket.socketpair = old_socketpair
socket.SocketType = old_SocketType
socket._socketobject = old_socket
socket.create_connection = old_create_connection
socket.gethostname = old_gethostname
socket.gethostbyname = old_gethostbyname
socket.getaddrinfo = old_getaddrinfo
socket.__dict__['socket'] = old_socket
socket.__dict__['socketpair'] = old_socketpair
socket.__dict__['_socketobject'] = old_socket
socket.__dict__['SocketType'] = old_SocketType
socket.__dict__['create_connection'] = old_create_connection
socket.__dict__['gethostname'] = old_gethostname
socket.__dict__['gethostbyname'] = old_gethostbyname
socket.__dict__['getaddrinfo'] = old_getaddrinfo
if socks:
socks.socksocket = old_socksocket
socks.__dict__['socksocket'] = old_socksocket
if ssl:
ssl.wrap_socket = old_ssl_wrap_socket
ssl.SSLSocket = old_sslsocket
try:
ssl.SSLContext.wrap_socket = old_sslcontext_wrap_socket
except AttributeError:
pass
ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket
ssl.__dict__['SSLSocket'] = old_sslsocket
if requests_urllib3_connection is not None:
requests_urllib3_connection.ssl_wrap_socket = \
old_requests_ssl_wrap_socket
requests_urllib3_connection.__dict__['ssl_wrap_socket'] = \
old_requests_ssl_wrap_socket
# Put the pyopenssl version back in place
for inject_from_urllib3 in pyopenssl_overrides_inject:
inject_into_urllib3()
@contextlib.contextmanager
def restored_libs():
undo_patch_socket()
yield
apply_patch_socket()
class httprettized(object):
"""`context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_ for enabling HTTPretty.
.. tip:: Also available under the alias :py:func:`httpretty.enabled`
.. testcode::
import json
import httpretty
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip', body=json.dumps({'origin': '42.42.42.42'}))
with httpretty.enabled():
response = requests.get('https://httpbin.org/ip')
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
assert response.json() == {'origin': '42.42.42.42'}
"""
def __init__(self, allow_net_connect=True, verbose=False):
self.allow_net_connect = allow_net_connect
self.verbose = verbose
def __enter__(self):
httpretty.reset()
httpretty.enable(allow_net_connect=self.allow_net_connect, verbose=self.verbose)
def __exit__(self, exc_type, exc_value, db):
httpretty.disable()
httpretty.reset()
def httprettified(test=None, allow_net_connect=True, verbose=False):
"""decorator for test functions
.. tip:: Also available under the alias :py:func:`httpretty.activate`
:param test: a callable
example usage with `nosetests <https://nose.readthedocs.io/en/latest/>`_
.. testcode::
import sure
from httpretty import httprettified
@httprettified
def test_using_nosetests():
httpretty.register_uri(
httpretty.GET,
'https://httpbin.org/ip'
)
response = requests.get('https://httpbin.org/ip')
response.json().should.equal({
"message": "HTTPretty :)"
})
example usage with `unittest module <https://docs.python.org/3/library/unittest.html>`_
.. testcode::
import unittest
from sure import expect
from httpretty import httprettified
@httprettified
class TestWithPyUnit(unittest.TestCase):
def test_httpbin(self):
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip')
response = requests.get('https://httpbin.org/ip')
expect(response.json()).to.equal({
"message": "HTTPretty :)"
})
"""
def decorate_unittest_TestCase_setUp(klass):
# Prefer addCleanup (added in python 2.7), but fall back
# to using tearDown if it isn't available
use_addCleanup = hasattr(klass, 'addCleanup')
original_setUp = (klass.setUp
if hasattr(klass, 'setUp')
else None)
def new_setUp(self):
httpretty.reset()
httpretty.enable(allow_net_connect, verbose=verbose)
if use_addCleanup:
self.addCleanup(httpretty.disable)
if original_setUp:
original_setUp(self)
klass.setUp = new_setUp
if not use_addCleanup:
original_tearDown = (klass.setUp
if hasattr(klass, 'tearDown')
else None)
def new_tearDown(self):
httpretty.disable()
httpretty.reset()
if original_tearDown:
original_tearDown(self)
klass.tearDown = new_tearDown
return klass
def decorate_test_methods(klass):
for attr in dir(klass):
if not attr.startswith('test_'):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
setattr(klass, attr, decorate_callable(attr_value))
return klass
def is_unittest_TestCase(klass):
try:
import unittest
return issubclass(klass, unittest.TestCase)
except ImportError:
return False
def decorate_class(klass):
if is_unittest_TestCase(klass):
return decorate_unittest_TestCase_setUp(klass)
return decorate_test_methods(klass)
def decorate_callable(test):
@functools.wraps(test)
def wrapper(*args, **kw):
with httprettized(allow_net_connect):
return test(*args, **kw)
return wrapper
if isinstance(test, type):
return decorate_class(test)
elif callable(test):
return decorate_callable(test)
return decorate_callable
|
app.py
|
import logging
import threading
import weakref
from logging import getLogger, NullHandler
from .helper.iter_utils import align_iterables
from .root import Root
logger = getLogger(__name__)
logger.addHandler(NullHandler())
class App:
def __init__(self):
self.root_widget = Root
self.root_view = None
self.__stopped = False
self.is_rendering = False
self.__need_update = False
self.view_constructor = None
self.thread = threading.Thread(target=self.lifecycle)
def __enter__(self):
return self
def __exit__(self, error_type, value, traceback):
pass
def __str__(self):
return "App"
def body(self, view_constructor):
self.view_constructor = view_constructor
def render(self):
self.is_rendering = True
with self.root_widget() as new_tree:
self.view_constructor()
App.update_tree(self.root_view, new_tree, root=self.root_widget)
logger.info("finished updating view tree")
logger.debug("###OLD TREE" + "#"*18)
if self.root_view is not None:
self.root_view.dump_tree()
else:
logger.debug("(None)")
logger.debug("###NEW TREE" + "#"*18)
if new_tree is not None:
new_tree.dump_tree()
else:
logger.debug("(None)")
logger.debug("#"*30)
if self.root_view is not None:
self.root_view.remove()
logger.info("finish remove old tree")
self.root_view = new_tree
self.root_view.render()
logger.info("finish render tree")
self.is_rendering = False
self.need_update = False
@classmethod
def update_tree(cls, old_tree, new_tree, root=Root):
if new_tree is None:
return
if (old_tree is None
or new_tree.widget_type is not old_tree.widget_type):
logger.debug("building new tree")
if old_tree is not None:
logger.debug("changed widget type {} -> {}".format(old_tree.widget_type, new_tree.widget_type))
new_tree.build(root=root)
logger.debug("finish build tree")
return
new_tree.widget = old_tree.widget
new_tree.widget.owner_view = weakref.ref(new_tree)
if new_tree.hashcode != old_tree.hashcode:
logger.debug("update widget parameters")
new_tree.widget.update(*new_tree.args, **new_tree.kwargs)
new_tree.need_update = True
for old_subtree, new_subtree in align_iterables(old_tree.children, new_tree.children, key='id'):
App.update_tree(old_subtree, new_subtree, root=root)
@property
def need_update(self):
if self.__need_update:
return True
return False
@need_update.setter
def need_update(self, value):
self.__need_update = value
def start(self):
self.thread.start()
def lifecycle(self):
while not self.__stopped:
if self.need_update:
self.pre_render()
logger.debug("render start from lifecycle")
self.render()
self.post_render()
self.before_exit()
return
def pre_render(self):
pass
def post_render(self):
pass
def before_exit(self):
pass
def stop(self):
self.__stopped = True
self.thread.join()
|
service.py
|
import os
import re
import sys
import json
import time
import click
import psutil
import importlib
import traceback
import threading
import subprocess
import watchdog
import anchore_engine.configuration.localconfig
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
from anchore_engine.subsys import logger
import anchore_engine.db.entities.common
from anchore_engine.db.entities.exceptions import TableNotFoundError
from anchore_engine.db.entities.exceptions import is_table_not_found
import anchore_manager.cli.utils
service_map = {
'analyzer': 'anchore-worker',
'simplequeue': 'anchore-simplequeue',
'apiext': 'anchore-api',
'catalog': 'anchore-catalog',
'kubernetes_webhook': 'anchore-kubernetes-webhook',
'policy_engine': 'anchore-policy-engine'
}
class AnchoreLogWatcher(RegexMatchingEventHandler):
regexes = [re.compile(".*/anchore-.*\.log$")]
files = {}
def do_close(self, event):
if event.src_path in self.files and self.files[event.src_path]['filehandle']:
self.files[event.src_path]['filehandle'].close()
self.files[event.src_path] = {'filehandle': None, 'filetell': 0}
def on_deleted(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {'filehandle': None, 'filetell': 0}
self.do_close(event)
def on_modified(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {'filehandle': None, 'filetell': 0}
if not self.files[event.src_path]['filehandle']:
if os.path.exists(event.src_path):
self.files[event.src_path]['filehandle'] = open(event.src_path)
if self.files[event.src_path]['filehandle']:
patt = re.match(".*anchore-(.*)\.log$", event.src_path)
if patt:
logname = patt.group(1)
else:
logname = event.src_path
for line in self.files[event.src_path]['filehandle'].readlines():
sys.stdout.write("[service:" + str(logname) + "] " + line)
self.files[event.src_path]['filetell'] = self.files[event.src_path]['filehandle'].tell()
def on_created(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {'filehandle': None, 'filetell': 0}
if self.files[event.src_path]['filehandle']:
self.do_close(event)
if os.path.exists(event.src_path):
self.files[event.src_path]['filehandle'] = open(event.src_path)
self.files[event.src_path]['filetell'] = 0
def on_moved(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {'filehandle': None, 'filetell': 0}
self.on_created(event)
def on_any_event(self, event):
if event.src_path not in self.files:
self.files[event.src_path] = {'filehandle': None, 'filetell': 0}
class ServiceThread():
def __init__(self, thread_target, thread_args):
self.thread_target = thread_target
self.thread_args = thread_args
self.start()
def start(self):
self.thread = threading.Thread(target=self.thread_target, args=self.thread_args)
self.thread.name = self.thread_args[0]
self.thread.start()
def terminate_service(service, flush_pidfile=False):
pidfile = "/var/run/" + service + ".pid"
try:
logger.info("Looking for pre-existing service ({}) pid from pidfile ({})".format(service, pidfile))
thepid = None
if os.path.exists(pidfile):
with open(pidfile, 'r') as FH:
thepid = int(FH.read())
if thepid:
# get some additional information about the pid to determine whether or not to run the kill operations
thepid_is_theservice = False
try:
running_pid = psutil.Process(thepid)
cmdline = running_pid.cmdline()
if pidfile in cmdline:
thepid_is_theservice = True
logger.info("Found existing service ({}) running with pid ({})".format(service, thepid))
else:
logger.info("Found pid running but belongs to unrelated process - skipping terminate")
except Exception as err:
thepid_is_theservice = False
if thepid_is_theservice:
try:
logger.info("Terminating existing service ({}) with pid ({}) using signal 0".format(service, thepid))
os.kill(thepid, 0)
except OSError:
pass
else:
logger.info("Terminating existing service ({}) with pid ({}) using signal 9".format(service, thepid))
os.kill(thepid, 9)
if flush_pidfile:
logger.info("Removing stale pidfile ({}) for service ({})".format(pidfile, service))
os.remove(pidfile)
except Exception as err:
logger.info("Could not detect/shut down running service ({}) - exception: {}".format(service, str(err)))
def startup_service(service, configdir):
pidfile = "/var/run/" + service + ".pid"
logfile = "/var/log/anchore/" + service + ".log"
# os.environ['ANCHORE_LOGFILE'] = logfile
logger.info("cleaning up service: {}".format(str(service)))
terminate_service(service, flush_pidfile=True)
twistd_cmd = '/bin/twistd'
for f in ['/bin/twistd', '/usr/local/bin/twistd']:
if os.path.exists(f):
twistd_cmd = f
cmd = [twistd_cmd, '--logger=anchore_engine.subsys.twistd_logger.logger', '--pidfile', pidfile, "-n", service, '--config', configdir]
logger.info("starting service: {}".format(str(service)))
logger.info("\t {}".format(' '.join(cmd)))
try:
newenv = os.environ.copy()
newenv['ANCHORE_LOGFILE'] = logfile
pipes = subprocess.Popen(cmd, env=newenv)
sout, serr = pipes.communicate()
rc = pipes.returncode
raise Exception("process exited: " + str(rc))
except Exception as err:
logger.exception("service process exited at ({}): {}".format(str(time.ctime()), str(err)))
logger.fatal('Could not start service due to: {}'.format(str(err)))
logger.info("exiting service thread")
return (False)
config = {}
module = None
@click.group(name='service', short_help='Service operations')
@click.pass_obj
def service(ctx_config):
global config, module
config = ctx_config
try:
# do some DB connection/pre-checks here
try:
log_level = 'INFO'
if config['debug']:
log_level = 'DEBUG'
logger.set_log_level(log_level, log_to_stdout=True)
except Exception as err:
raise err
except Exception as err:
logger.error(anchore_manager.cli.utils.format_error_output(config, 'service', {}, err))
sys.exit(2)
@service.command(name='list', short_help="List valid service names")
@click.option('--anchore-module', help='Module to list services for', default='anchore_engine')
def do_list(anchore_module):
click.echo('Locally installed and available service types:')
from anchore_engine.service import BaseService
# Expects a services module within the base module
importlib.import_module(anchore_module + '.services')
for name in BaseService.registry.keys():
click.echo(name)
anchore_manager.cli.utils.doexit(0)
return
@service.command(name='start', short_help="Start anchore-engine")
@click.argument('services', nargs=-1)
@click.option("--auto-upgrade", is_flag=True, help="Perform automatic upgrade on startup")
@click.option("--anchore-module", nargs=1, help="Name of anchore module to call DB routines from (default=anchore_engine)")
@click.option("--skip-config-validate", nargs=1, help="Comma-separated list of configuration file sections to skip specific validation processing (e.g. services,credentials,webhooks)")
@click.option("--skip-db-compat-check", is_flag=True, help="Skip the database compatibility check.")
@click.option("--all", is_flag=True, default=False)
def start(services, auto_upgrade, anchore_module, skip_config_validate, skip_db_compat_check, all):
"""
Startup and monitor service processes. Specify a list of service names or empty for all.
"""
global config
ecode = 0
auto_upgrade = True
if not anchore_module:
module_name = "anchore_engine"
else:
module_name = str(anchore_module)
if os.environ.get('ANCHORE_ENGINE_SKIP_DB_COMPAT_CHECK', str(skip_db_compat_check)).lower() in ['true', 't', 'y', 'yes']:
skip_db_compat_check = True
else:
skip_db_compat_check = False
if services:
input_services = list(services)
else:
input_services = os.getenv('ANCHORE_ENGINE_SERVICES', '').strip().split()
if not input_services and not all:
raise click.exceptions.BadArgumentUsage('No services defined to start. Must either provide service arguments, ANCHORE_ENGINE_SERVICES env var, or --all option')
try:
validate_params = {
'services': True,
'webhooks': True,
'credentials': True
}
if skip_config_validate:
try:
items = skip_config_validate.split(',')
for item in items:
validate_params[item] = False
except Exception as err:
raise Exception(err)
# find/set up configuration
configdir = config['configdir']
configfile = os.path.join(configdir, "config.yaml")
localconfig = None
if os.path.exists(configfile):
try:
localconfig = anchore_engine.configuration.localconfig.load_config(configdir=configdir, configfile=configfile, validate_params=validate_params)
except Exception as err:
raise Exception("cannot load local configuration: " + str(err))
else:
raise Exception("cannot locate configuration file ({})".format(configfile))
# load the appropriate DB module
try:
logger.info("Loading DB routines from module ({})".format(module_name))
module = importlib.import_module(module_name + ".db.entities.upgrade")
except TableNotFoundError as ex:
logger.info("Initialized DB not found.")
except Exception as err:
raise Exception("Input anchore-module (" + str(module_name) + ") cannot be found/imported - exception: " + str(err))
# get the list of local services to start
startFailed = False
if not input_services:
config_services = localconfig.get('services', {})
if not config_services:
logger.warn('could not find any services to execute in the config file')
sys.exit(1)
input_services = [ name for name, srv_conf in list(config_services.items()) if srv_conf.get('enabled')]
services = []
for service_conf_name in input_services:
if service_conf_name in list(service_map.values()):
svc = service_conf_name
else:
svc = service_map.get(service_conf_name)
if svc:
services.append(svc)
else:
logger.warn('specified service {} not found in list of available services {} - removing from list of services to start'.format(service_conf_name, list(service_map.keys())))
if 'anchore-catalog' in services:
services.remove('anchore-catalog')
services.insert(0, 'anchore-catalog')
if not services:
logger.error("No services found in ANCHORE_ENGINE_SERVICES or as enabled in config.yaml to start - exiting")
sys.exit(1)
# preflight - db checks
try:
db_params = anchore_engine.db.entities.common.get_params(localconfig)
#override db_timeout since upgrade might require longer db session timeout setting
try:
db_params['db_connect_args']['timeout'] = 86400
except Exception as err:
pass
anchore_manager.cli.utils.connect_database(config, db_params, db_retries=300)
code_versions, db_versions = anchore_manager.cli.utils.init_database(upgrade_module=module, localconfig=localconfig, do_db_compatibility_check=(not skip_db_compat_check))
in_sync = False
timed_out = False
max_timeout = 3600
timer = time.time()
while not in_sync and not timed_out:
code_versions, db_versions = module.get_versions()
if code_versions and db_versions:
if code_versions['db_version'] != db_versions['db_version']:
if auto_upgrade and 'anchore-catalog' in services:
logger.info("Auto-upgrade is set - performing upgrade.")
try:
# perform the upgrade logic here
rc = module.run_upgrade()
if rc:
logger.info("Upgrade completed")
else:
logger.info("No upgrade necessary. Completed.")
except Exception as err:
raise err
in_sync = True
else:
logger.warn("this version of anchore-engine requires the anchore DB version ({}) but we discovered anchore DB version ({}) in the running DB - it is safe to run the upgrade while seeing this message - will retry for {} more seconds.".format(str(code_versions['db_version']), str(db_versions['db_version']), str(max_timeout - int(time.time() - timer))))
time.sleep(5)
else:
logger.info("DB version and code version in sync.")
in_sync = True
else:
logger.warn('no existing anchore DB data can be discovered, assuming bootstrap')
in_sync = True
if (max_timeout - int(time.time() - timer)) < 0:
timed_out = True
if not in_sync:
raise Exception("this version of anchore-engine requires the anchore DB version ("+str(code_versions['db_version'])+") but we discovered anchore DB version ("+str(db_versions['db_version'])+") in the running DB - please perform the DB upgrade process and retry")
except Exception as err:
raise err
finally:
rc = anchore_engine.db.entities.common.do_disconnect()
# start up services
logger.info('Starting services: {}'.format(services))
try:
if not os.path.exists("/var/log/anchore"):
os.makedirs("/var/log/anchore/", 0o755)
except Exception as err:
logger.error("cannot create log directory /var/log/anchore - exception: {}".format(str(err)))
raise err
pids = []
keepalive_threads = []
for service in services:
pidfile = "/var/run/" + service + ".pid"
try:
terminate_service(service, flush_pidfile=True)
service_thread = ServiceThread(startup_service, (service, configdir))
keepalive_threads.append(service_thread)
max_tries = 30
tries = 0
alive = True
while not os.path.exists(pidfile) and tries < max_tries:
logger.info("waiting for service pidfile {} to exist {}/{}".format(pidfile, tries, max_tries))
try:
alive = service_thread.thread.is_alive()
except:
pass
if not alive:
logger.info("service thread has stopped {}".format(service))
break
time.sleep(1)
tries = tries + 1
logger.info("auto_restart_services setting: {}".format(localconfig.get('auto_restart_services', False)))
if not localconfig.get('auto_restart_services', False):
logger.info("checking for startup failure pidfile={}, is_alive={}".format(os.path.exists(pidfile), alive))
if not os.path.exists(pidfile) or not alive:
raise Exception("service thread for ({}) failed to start".format(service))
time.sleep(1)
except Exception as err:
startFailed = True
logger.warn("service start failed - exception: {}".format(str(err)))
break
if startFailed:
logger.fatal("one or more services failed to start. cleanly terminating the others")
for service in services:
terminate_service(service, flush_pidfile=True)
sys.exit(1)
else:
# start up the log watchers
try:
observer = Observer()
observer.schedule(AnchoreLogWatcher(), path="/var/log/anchore/")
observer.start()
try:
while True:
time.sleep(1)
if localconfig.get('auto_restart_services', False): #'auto_restart_services' in localconfig and localconfig['auto_restart_services']:
for service_thread in keepalive_threads:
if not service_thread.thread.is_alive():
logger.info("restarting service: {}".format(service_thread.thread.name))
service_thread.start()
except KeyboardInterrupt:
observer.stop()
observer.join()
except Exception as err:
logger.error("failed to startup log watchers - exception: {}".format(str(err)))
raise err
except Exception as err:
logger.error(anchore_manager.cli.utils.format_error_output(config, 'servicestart', {}, err))
if not ecode:
ecode = 2
anchore_manager.cli.utils.doexit(ecode)
|
conftest.py
|
import array
import functools
import logging
import os
import pytest
import signal
import subprocess
import sys
import threading
import time
import uuid
from types import SimpleNamespace
import caproto as ca
import caproto.benchmarking # noqa
from caproto.sync.client import read
import caproto.threading # noqa
import caproto.asyncio # noqa
_repeater_process = None
REPEATER_PORT = 5065
SERVER_HOST = '0.0.0.0'
# make the logs noisy
logger = logging.getLogger('caproto')
logger.setLevel('DEBUG')
# except for the broadcaster
bcast_logger = logging.getLogger('caproto.bcast')
bcast_logger.setLevel('INFO')
array_types = (array.array,)
try:
import numpy
except ImportError:
pass
else:
array_types = array_types + (numpy.ndarray,)
# Don't import these from numpy because we do not assume that numpy is
# installed.
def assert_array_equal(arr1, arr2):
assert len(arr1) == len(arr2)
for i, j in zip(arr1, arr2):
assert i == j
def assert_array_almost_equal(arr1, arr2):
assert len(arr1) == len(arr2)
for i, j in zip(arr1, arr2):
assert abs(i - j) < 1e-6
def run_example_ioc(module_name, *, request, pv_to_check, args=None,
stdin=None, stdout=None, stderr=None, very_verbose=True):
'''Run an example IOC by module name as a subprocess
Parameters
----------
module_name : str
request : pytest request
pv_to_check : str
args : list, optional
'''
if args is None:
args = []
if module_name == '--script':
logger.debug(f'Running script {args}')
else:
logger.debug(f'Running {module_name}')
if '-vvv' not in args and very_verbose:
args = list(args) + ['-vvv']
os.environ['COVERAGE_PROCESS_START'] = '.coveragerc'
p = subprocess.Popen([sys.executable, '-um', 'caproto.tests.example_runner',
module_name] + list(args),
stdout=stdout, stderr=stderr, stdin=stdin,
env=os.environ)
def stop_ioc():
if p.poll() is None:
if sys.platform != 'win32':
logger.debug('Sending Ctrl-C to the example IOC')
p.send_signal(signal.SIGINT)
logger.debug('Waiting on process...')
try:
p.wait(timeout=1)
except subprocess.TimeoutExpired:
logger.debug('IOC did not exit in a timely fashion')
p.terminate()
logger.debug('IOC terminated')
else:
logger.debug('IOC has exited')
else:
logger.debug('Example IOC has already exited')
if request is not None:
request.addfinalizer(stop_ioc)
if pv_to_check:
looks_like_areadetector = 'areadetector' in module_name
if looks_like_areadetector:
poll_timeout, poll_attempts = 5.0, 5
else:
poll_timeout, poll_attempts = 1.0, 5
poll_readiness(pv_to_check, timeout=poll_timeout,
attempts=poll_attempts)
return p
def poll_readiness(pv_to_check, attempts=5, timeout=1):
logger.debug(f'Checking PV {pv_to_check}')
start_repeater()
for attempt in range(attempts):
try:
read(pv_to_check, timeout=timeout, repeater=False)
except (TimeoutError, ConnectionRefusedError):
continue
else:
break
else:
raise TimeoutError(f"ioc fixture failed to start in "
f"{attempts * timeout} seconds (pv: {pv_to_check})")
def run_softioc(request, db, additional_db=None, **kwargs):
db_text = ca.benchmarking.make_database(db)
if additional_db is not None:
db_text = '\n'.join((db_text, additional_db))
err = None
for attempt in range(3):
ioc_handler = ca.benchmarking.IocHandler()
ioc_handler.setup_ioc(db_text=db_text, max_array_bytes='10000000',
**kwargs)
request.addfinalizer(ioc_handler.teardown)
(pv_to_check, _), *_ = db
try:
poll_readiness(pv_to_check)
except TimeoutError as err_:
err = err_
else:
return ioc_handler
else:
# ran out of retry attempts
raise err
@pytest.fixture(scope='function')
def prefix():
'Random PV prefix for a server'
return str(uuid.uuid4())[:8] + ':'
def _epics_base_ioc(prefix, request):
name = 'Waveform and standard record IOC'
db = {
('{}waveform'.format(prefix), 'waveform'):
dict(FTVL='LONG', NELM=4000),
('{}float'.format(prefix), 'ai'): dict(VAL=3.14),
('{}enum'.format(prefix), 'bi'):
dict(VAL=1, ZNAM="zero", ONAM="one"),
('{}str'.format(prefix), 'stringout'): dict(VAL='test'),
('{}int'.format(prefix), 'longout'): dict(VAL=1),
('{}int2'.format(prefix), 'longout'): dict(VAL=1),
('{}int3'.format(prefix), 'longout'): dict(VAL=1),
}
macros = {'P': prefix}
handler = run_softioc(request, db,
additional_db=ca.benchmarking.PYEPICS_TEST_DB,
macros=macros)
process = handler.processes[-1]
exit_lock = threading.RLock()
monitor_output = []
def ioc_monitor():
process.wait()
with exit_lock:
monitor_output.extend([
f'***********************************',
f'********IOC process exited!********',
f'******* Returned: {process.returncode} ******',
f'***********************************''',
])
stdout, stderr = process.communicate()
if process.returncode != 0:
if stdout is not None:
lines = [f'[Server-stdout] {line}'
for line in stdout.decode('latin-1').split('\n')]
monitor_output.extend(lines)
if stderr is not None:
lines = [f'[Server-stderr] {line}'
for line in stdout.decode('latin-1').split('\n')]
monitor_output.extend(lines)
def ioc_monitor_output():
with exit_lock:
if monitor_output:
logger.debug('IOC monitor output:')
for line in monitor_output:
logger.debug(line)
request.addfinalizer(ioc_monitor_output)
threading.Thread(target=ioc_monitor).start()
pvs = {pv[len(prefix):]: pv
for pv, rtype in db
}
return SimpleNamespace(process=process, prefix=prefix, name=name, pvs=pvs,
type='epics-base')
def _caproto_ioc(prefix, request):
name = 'Caproto type varieties example'
pvs = dict(int=prefix + 'int',
int2=prefix + 'int2',
int3=prefix + 'int3',
float=prefix + 'pi',
str=prefix + 'str',
enum=prefix + 'enum',
waveform=prefix + 'waveform',
chararray=prefix + 'chararray',
empty_string=prefix + 'empty_string',
empty_bytes=prefix + 'empty_bytes',
empty_char=prefix + 'empty_char',
empty_float=prefix + 'empty_float',
)
process = run_example_ioc('caproto.ioc_examples.type_varieties',
request=request,
pv_to_check=pvs['float'],
args=('--prefix', prefix,))
return SimpleNamespace(process=process, prefix=prefix, name=name, pvs=pvs,
type='caproto')
caproto_ioc = pytest.fixture(scope='function')(_caproto_ioc)
epics_base_ioc = pytest.fixture(scope='function')(_epics_base_ioc)
@pytest.fixture(params=['caproto', 'epics-base'], scope='function')
def ioc_factory(prefix, request):
'A fixture that runs more than one IOC: caproto, epics'
# Get a new prefix for each IOC type:
if request.param == 'caproto':
return functools.partial(_caproto_ioc, prefix, request)
elif request.param == 'epics-base':
return functools.partial(_epics_base_ioc, prefix, request)
@pytest.fixture(params=['caproto', 'epics-base'], scope='function')
def ioc(prefix, request):
'A fixture that runs more than one IOC: caproto, epics'
# Get a new prefix for each IOC type:
if request.param == 'caproto':
ioc_ = _caproto_ioc(prefix, request)
elif request.param == 'epics-base':
ioc_ = _epics_base_ioc(prefix, request)
return ioc_
def start_repeater():
global _repeater_process
if _repeater_process is not None:
return
logger.info('Spawning repeater process')
_repeater_process = run_example_ioc('--script',
args=['caproto-repeater'],
request=None,
pv_to_check=None)
time.sleep(1.0)
def stop_repeater():
global _repeater_process
if _repeater_process is None:
return
logger.info('[Repeater] Sending Ctrl-C to the repeater')
if sys.platform == 'win32':
_repeater_process.terminate()
else:
_repeater_process.send_signal(signal.SIGINT)
_repeater_process.wait()
_repeater_process = None
logger.info('[Repeater] Repeater exited')
def default_setup_module(module):
logger.info('-- default module setup {} --'.format(module.__name__))
start_repeater()
def default_teardown_module(module):
logger.info('-- default module teardown {} --'.format(module.__name__))
stop_repeater()
@pytest.fixture(scope='function')
def pvdb_from_server_example():
alarm = ca.ChannelAlarm(
status=ca.AlarmStatus.READ,
severity=ca.AlarmSeverity.MINOR_ALARM,
alarm_string='alarm string',
)
pvdb = {
'pi': ca.ChannelDouble(value=3.14,
lower_disp_limit=3.13,
upper_disp_limit=3.15,
lower_alarm_limit=3.12,
upper_alarm_limit=3.16,
lower_warning_limit=3.11,
upper_warning_limit=3.17,
lower_ctrl_limit=3.10,
upper_ctrl_limit=3.18,
precision=5,
units='doodles',
alarm=alarm,
),
'enum': ca.ChannelEnum(value='b',
enum_strings=['a', 'b', 'c', 'd'],
),
'enum2': ca.ChannelEnum(value='bb',
enum_strings=['aa', 'bb', 'cc', 'dd'],
),
'int': ca.ChannelInteger(value=96,
units='doodles',
),
'char': ca.ChannelByte(value=b'3',
units='poodles',
lower_disp_limit=33,
upper_disp_limit=35,
lower_alarm_limit=32,
upper_alarm_limit=36,
lower_warning_limit=31,
upper_warning_limit=37,
lower_ctrl_limit=30,
upper_ctrl_limit=38,
),
'bytearray': ca.ChannelByte(value=b'1234567890' * 2),
'chararray': ca.ChannelChar(value=b'1234567890' * 2),
'str': ca.ChannelString(value='hello',
string_encoding='latin-1',
alarm=alarm),
'str2': ca.ChannelString(value='hello',
string_encoding='latin-1',
alarm=alarm),
'stra': ca.ChannelString(value=['hello', 'how is it', 'going'],
string_encoding='latin-1'),
}
return pvdb
@pytest.fixture(scope='function')
def curio_server(prefix):
str_alarm_status = ca.ChannelAlarm(
status=ca.AlarmStatus.READ,
severity=ca.AlarmSeverity.MINOR_ALARM,
alarm_string='alarm string',
)
caget_pvdb = {
'pi': ca.ChannelDouble(value=3.14,
lower_disp_limit=3.13,
upper_disp_limit=3.15,
lower_alarm_limit=3.12,
upper_alarm_limit=3.16,
lower_warning_limit=3.11,
upper_warning_limit=3.17,
lower_ctrl_limit=3.10,
upper_ctrl_limit=3.18,
precision=5,
units='doodles',
),
'enum': ca.ChannelEnum(value='b',
enum_strings=['a', 'b', 'c', 'd'],
),
'int': ca.ChannelInteger(value=33,
units='poodles',
lower_disp_limit=33,
upper_disp_limit=35,
lower_alarm_limit=32,
upper_alarm_limit=36,
lower_warning_limit=31,
upper_warning_limit=37,
lower_ctrl_limit=30,
upper_ctrl_limit=38,
),
'char': ca.ChannelByte(value=b'3',
units='poodles',
lower_disp_limit=33,
upper_disp_limit=35,
lower_alarm_limit=32,
upper_alarm_limit=36,
lower_warning_limit=31,
upper_warning_limit=37,
lower_ctrl_limit=30,
upper_ctrl_limit=38,
),
'str': ca.ChannelString(value='hello',
alarm=str_alarm_status,
reported_record_type='caproto'),
}
# tack on a unique prefix
caget_pvdb = {prefix + key: value
for key, value in caget_pvdb.items()}
# Hide these imports so that the other fixtures are usable by other
# libraries (e.g. ophyd) without the experimental dependencies.
import curio
import caproto.curio
async def _server(pvdb):
ctx = caproto.curio.server.Context(pvdb)
try:
await ctx.run()
except caproto.curio.server.ServerExit:
logger.info('ServerExit caught; exiting')
except Exception as ex:
logger.error('Server failed: %s %s', type(ex), ex)
raise
async def run_server(client, *, pvdb=caget_pvdb):
server_task = await curio.spawn(_server, pvdb, daemon=True)
try:
await client()
except caproto.curio.server.ServerExit:
...
finally:
await server_task.cancel()
return run_server, prefix, caget_pvdb
async def get_curio_context():
logger.debug('New curio broadcaster')
# Hide this import so that the other fixtures are usable by other
# libraries (e.g. ophyd) without the experimental dependencies.
import caproto.curio
broadcaster = caproto.curio.client.SharedBroadcaster()
logger.debug('Registering...')
await broadcaster.register()
logger.debug('Registered! Returning new context.')
return caproto.curio.client.Context(broadcaster)
def run_with_trio_context(func, **kwargs):
# Hide these imports so that the other fixtures are usable by other
# libraries (e.g. ophyd) without the experimental dependencies.
import caproto.trio
import trio
async def runner():
async with trio.open_nursery() as nursery:
logger.debug('New trio broadcaster')
broadcaster = caproto.trio.client.SharedBroadcaster(
nursery=nursery)
logger.debug('Registering...')
await broadcaster.register()
logger.debug('Registered! Returning new context.')
context = caproto.trio.client.Context(broadcaster, nursery=nursery)
ret = await func(context=context, **kwargs)
logger.debug('Shutting down the broadcaster')
await broadcaster.disconnect()
logger.debug('And the context')
# await context.stop()
nursery.cancel_scope.cancel()
return ret
return trio.run(runner)
@pytest.fixture(scope='function',
params=['curio', 'trio', 'asyncio'])
def server(request):
def curio_runner(pvdb, client, *, threaded_client=False):
# Hide these imports so that the other fixtures are usable by other
# libraries (e.g. ophyd) without the experimental dependencies.
import curio
import caproto.curio
async def server_main():
try:
ctx = caproto.curio.server.Context(pvdb)
await ctx.run()
except caproto.curio.server.ServerExit:
logger.info('Server exited normally')
except Exception as ex:
logger.error('Server failed: %s %s', type(ex), ex)
raise
async def run_server_and_client():
try:
server_task = await curio.spawn(server_main)
# Give this a couple tries, akin to poll_readiness.
for _ in range(15):
try:
if threaded_client:
await threaded_in_curio_wrapper(client)()
else:
await client()
except TimeoutError:
continue
else:
break
else:
raise TimeoutError(f"ioc failed to start")
finally:
await server_task.cancel()
with curio.Kernel() as kernel:
kernel.run(run_server_and_client)
def trio_runner(pvdb, client, *, threaded_client=False):
# Hide these imports so that the other fixtures are usable by other
# libraries (e.g. ophyd) without the experimental dependencies.
import trio
import caproto.trio
async def trio_server_main(task_status):
try:
ctx = caproto.trio.server.Context(pvdb)
task_status.started(ctx)
await ctx.run()
except Exception as ex:
logger.error('Server failed: %s %s', type(ex), ex)
raise
async def run_server_and_client():
async with trio.open_nursery() as test_nursery:
server_context = await test_nursery.start(trio_server_main)
# Give this a couple tries, akin to poll_readiness.
for _ in range(15):
try:
if threaded_client:
await trio.run_sync_in_worker_thread(client)
else:
await client(test_nursery, server_context)
except TimeoutError:
continue
else:
break
server_context.stop()
# don't leave the server running:
test_nursery.cancel_scope.cancel()
trio.run(run_server_and_client)
def asyncio_runner(pvdb, client, *, threaded_client=False):
import asyncio
async def asyncio_server_main():
try:
ctx = caproto.asyncio.server.Context(pvdb)
await ctx.run()
except Exception as ex:
logger.error('Server failed: %s %s', type(ex), ex)
raise
async def run_server_and_client(loop):
tsk = loop.create_task(asyncio_server_main())
# Give this a couple tries, akin to poll_readiness.
for _ in range(15):
try:
if threaded_client:
await loop.run_in_executor(client)
else:
await client()
except TimeoutError:
continue
else:
break
tsk.cancel()
await asyncio.wait((tsk, ))
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(run_server_and_client(loop))
if request.param == 'curio':
curio_runner.backend = 'curio'
return curio_runner
elif request.param == 'trio':
trio_runner.backend = 'trio'
return trio_runner
elif request.param == 'asyncio':
asyncio_runner.backend = 'asyncio'
return asyncio_runner
def pytest_make_parametrize_id(config, val, argname):
# FIX for python 3.6.3 and/or pytest 3.3.0
if isinstance(val, bytes):
return repr(val)
@pytest.fixture(scope='function')
def circuit_pair(request):
host = '127.0.0.1'
port = 5555
priority = 1
version = 13
cli_circuit = ca.VirtualCircuit(ca.CLIENT, (host, port), priority)
buffers_to_send = cli_circuit.send(ca.VersionRequest(version=version,
priority=priority))
srv_circuit = ca.VirtualCircuit(ca.SERVER, (host, port), None)
commands, _ = srv_circuit.recv(*buffers_to_send)
for command in commands:
srv_circuit.process_command(command)
buffers_to_send = srv_circuit.send(ca.VersionResponse(version=version))
commands, _ = cli_circuit.recv(*buffers_to_send)
for command in commands:
cli_circuit.process_command(command)
return cli_circuit, srv_circuit
# Import the pytest-benchmark -> asv shim if both are available
try:
__import__('pytest_benchmark')
__import__('asv')
except ImportError as ex:
print('{} is missing'.format(ex))
else:
from ._asv_shim import get_conftest_globals
globals().update(**get_conftest_globals())
def threaded_in_curio_wrapper(fcn):
'''Run a threaded test with curio support
Usage
-----
Wrap the threaded function using this wrapper, call the wrapped function
using `curio.run_in_thread` and then await wrapped_function.wait() inside
the test kernel.
'''
# Hide this import so that the other fixtures are usable by other
# libraries (e.g. ophyd) without the experimental dependencies.
import curio
uqueue = curio.UniversalQueue()
def wrapped_threaded_func():
try:
fcn()
except Exception as ex:
uqueue.put(ex)
else:
uqueue.put(None)
@functools.wraps(fcn)
async def test_runner():
'Wait for the test function completion'
await curio.run_in_thread(wrapped_threaded_func)
res = await uqueue.get()
if res is not None:
raise res
return test_runner
@pytest.fixture(scope='function', params=['array', 'numpy'])
def backends(request):
from caproto import select_backend, backend
def switch_back():
select_backend(initial_backend)
initial_backend = backend.backend_name
request.addfinalizer(switch_back)
try:
select_backend(request.param)
except KeyError:
raise pytest.skip(f'backend {request.param} unavailable')
def dump_process_output(prefix, stdout, stderr):
print('-- Process stdout --')
if stdout is not None:
for line in stdout.decode('latin-1').split('\n'):
print(f'[{prefix}-stdout]', line)
print('-- Process stderr --')
if stderr is not None:
for line in stderr.decode('latin-1').split('\n'):
print(f'[{prefix}-stderr]', line)
print('--')
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(item):
'Socket and thread debugging hook'
from .debug import use_debug_socket, use_thread_counter
with use_thread_counter() as (dangling_threads, thread_counter):
with use_debug_socket() as (sockets, socket_counter):
yield
num_dangling = len(dangling_threads)
num_threads = thread_counter.value
if num_threads:
if num_dangling:
thread_info = ', '.join(str(thread) for thread in dangling_threads)
logger.warning('%d thread(s) left dangling out of %d! %s',
num_dangling, num_threads, thread_info)
# pytest.fail() ?
else:
logger.debug('%d thread(s) OK', num_threads)
item.user_properties.append(('total_threads', num_threads))
item.user_properties.append(('dangling_threads', num_dangling))
num_sockets = socket_counter.value
num_open = len(sockets)
if num_sockets:
if num_open:
logger.warning('%d sockets still open of %d', num_open,
num_sockets)
# pytest.fail() ?
else:
logger.debug('%d sockets OK', socket_counter.value)
item.user_properties.append(('total_sockets', num_sockets))
item.user_properties.append(('open_sockets', num_open))
|
server-socket.py
|
import socket
import threading
host = socket.gethostbyname(socket.gethostname())
port = 12458
# Starting the Server
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.bind((host, port))
serverSocket.listen()
# List for clients and their nicknames
clients = []
nicknames = []
# We want to broadcast the message to all the clients connected on the Server
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
# As long as a message is received, we broadcast the message
message = client.recv(1024)
broadcast(message)
except:
# Cutting the connection to this client and removing it from the list
index = clients.index(client)
clients.remove(index)
client.close()
# Also remove the client's nickname and broadcast that the client left
nickname = nicknames[index]
broadcast(f'{nickname} has left the chat.'.encode('ascii'))
nicknames.remove(nickname)
break
def receive():
while True:
# Accepting the client connection
client, address = serverSocket.accept()
print(f"Connected with {str(address)}")
# Requesting for a nickname from the client
client.send('NICK'.encode('ascii'))
nickname = client.recv(1024).decode('ascii')
nicknames.append(nickname)
clients.append(client)
# Broadcasting the message to all the clients in the server
print(f'Nickname of the client is {nickname}')
broadcast(f'{nickname} joined the chat.'.encode('ascii'))
client.send('Connected to the server'.encode('ascii'))
# Creating thread for handling the client
# target is a callable object to be invoked
thread = threading.Thread(target = handle, args = (client,))
thread.start() # Used to start a thread's activity
# Printing the number of active connections apart from the main thread
print(f"Active Connections: {threading.activeCount() - 1}")
print("The Server is listening.")
receive()
|
utils.py
|
import codecs
import json
import os
import shutil
import stat
from threading import Thread
import requests
import subprocess
import sys
import time
import unittest
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from flask import request
def file_content(path, binary=True):
if binary:
with open(path, 'rb') as f:
return f.read()
else:
with codecs.open(path, 'rb', 'utf-8') as f:
return f.read()
def get_after_script(after_log):
return 'python "{}" "{}"'.format(
os.path.join(os.path.split(os.path.abspath(__file__))[0], 'after_script.py'), after_log)
def get_after_log(after_log):
with codecs.open(after_log, 'rb', 'utf-8') as f:
return json.loads(f.read())
def start_executor(args, output_file=None, status_file=None, port=None, callback=None, token=None, env=None,
work_dir=None, run_after=None, no_exit=False, watch_generated=False,
buffer_size=4 * 1024 * 1024, subprocess_kwargs=None):
S = lambda s: s.decode('utf-8') if isinstance(s, bytes) else s
executor_args = [
'./ml-gridengine-executor',
'--server-host=127.0.0.1',
'--buffer-size={}'.format(buffer_size),
]
if output_file:
executor_args.append('--output-file={}'.format(output_file))
if status_file:
executor_args.append('--status-file={}'.format(status_file))
if port:
executor_args.append('--port={}'.format(port))
if callback:
executor_args.append('--callback-api={}'.format(callback))
if token:
executor_args.append('--callback-token={}'.format(token))
if env:
for k, v in env.items():
executor_args.append('--env={}={}'.format(S(k), S(v)))
if work_dir:
executor_args.append('--work-dir={}'.format(work_dir))
if run_after:
executor_args.append('--run-after={}'.format(run_after))
if no_exit:
executor_args.append('--no-exit')
if watch_generated:
executor_args.append('--watch-generated')
executor_args.append('--')
executor_args.extend(args)
print('Start executor: {}'.format(executor_args))
return subprocess.Popen(executor_args, **(subprocess_kwargs or {}))
@contextmanager
def run_executor_context(args, **kwargs):
with TemporaryDirectory() as tmpdir:
kwargs.setdefault('status_file', os.path.join(tmpdir, 'status.json'))
kwargs.setdefault('output_file', os.path.join(tmpdir, 'output.log'))
kwargs.setdefault('work_dir', os.path.join(tmpdir, 'work_dir'))
status_file = kwargs['status_file']
proc = start_executor(args, **kwargs)
try:
while proc.poll() is None and not os.path.exists(status_file):
time.sleep(.1)
status = json.loads(file_content(status_file, binary=False))
yield proc, {'uri': 'http://127.0.0.1:{}'.format(status['executor.port']),
'status_file': status_file,
'output_file': kwargs['output_file'],
'work_dir': kwargs['work_dir']}
finally:
proc.kill()
proc.wait()
def run_executor(args, **kwargs):
kwargs.setdefault('subprocess_kwargs', {})
kwargs['subprocess_kwargs'].setdefault('stdout', subprocess.PIPE)
kwargs['subprocess_kwargs'].setdefault('stderr', subprocess.STDOUT)
with run_executor_context(args, **kwargs) as (proc, ctx):
executor_output = None
try:
executor_output = proc.stdout.read()
with open(ctx['output_file'], 'rb') as f:
program_output = f.read()
return program_output, executor_output
except Exception:
if executor_output:
sys.stderr.buffer.write(executor_output)
raise
finally:
proc.kill()
proc.wait()
def get_count_exe():
return os.path.abspath('Count')
def get_count_output(N):
if N not in _cached_output_output:
_cached_output_output[N] = subprocess.check_output([get_count_exe(), str(N)])
return _cached_output_output[N]
_cached_output_output = {}
def compute_fs_size(path):
ret = 0
st = os.stat(path, follow_symlinks=False)
ret += st.st_size
if stat.S_ISDIR(st.st_mode):
for name in os.listdir(path):
ret += compute_fs_size(os.path.join(path, name))
return ret
class TestCase(unittest.TestCase):
"""Base class for all test cases."""
class AppServer(object):
def __init__(self, app, port=12345):
self._app = app
self._app.route('/_shutdown', methods=['POST'])(self._shutdown)
self._port = port
self._uri = 'http://127.0.0.1:{}'.format(port)
@property
def uri(self):
return self._uri
@property
def application(self):
return self._app
def _shutdown(self):
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return ''
def shutdown(self):
requests.post(self.uri + '/_shutdown', json={})
def run(self):
return self._app.run(debug=False, host='127.0.0.1', port=self._port)
@contextmanager
def run_context(self):
th = Thread(target=self.run)
try:
th.start()
while True:
try:
r = requests.get(self.uri)
break
except requests.ConnectionError:
time.sleep(.1)
yield self.uri
finally:
self.shutdown()
th.join()
|
ominibot_car_driver.py
|
#!/bin/usr/python3
# coding=UTF-8
#Copyright (c) 2021 Wei-Chih Lin(weichih.lin@protonmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Import Libraries
import rclpy, threading, math, time, re
from rclpy.node import Node
from .ominibot_car_com import OminibotCar
# import msgs
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from std_msgs.msg import Float32MultiArray
# tf library
import tf_transformations
# tf2 library
from tf2_ros import TransformBroadcaster
class OminibotCarDriverNode(Node):
"""Get velocity and plan mobile path
"""
def __init__(self):
# Node name
super().__init__("ominibot_car_driver")
self.node_name = self.get_name()
# declare parameter
## ominibot car
self.parameter = {
"port": "/dev/ominibot_car",
"baud": 115200,
"motor_encoder_count": 1170.0,
"motor_output_rpm": 185.0,
"motor_ratio": 90.0,
"motor_axis_length": 125.0,
"motor_axis_width": 150.0,
"wheel_diameter": 75.0,
"wheel_axis_length": 15.0,
"wheel_width": 30.0,
"odom_frequency": 20,
"timeout": None,
}
self.get_ros2_parameter()
# initiallize driver
self.driver = OminibotCar(self.parameter["port"], self.parameter["baud"], self.parameter["timeout"])
self.initialize_driver()
# Create subscriber
self.twist_subscriber = self.create_subscription(
Twist, '~/cmd_vel', self.callback_cmd_vel, 10)
# Create publisher
#self.odom_publisher = self.create_publisher(Float32MultiArray, "~/odom", 10)
# log
self.get_logger().info(f'Start!')
def get_ros2_parameter(self):
"""from ros2 parameter server to get parameter, and produce mecanum factor
"""
for key in self.parameter.keys():
self.declare_parameter(key, self.parameter[key])
self.parameter[key] = self.get_parameter(key).value
self.get_logger().info(f"Publish ros2 parameter, {key}: {self.parameter[key]}")
## use ros2 parameter to produce mecanum factor
self.mecanum_factor()
def mecanum_factor(self):
"""factor for mecanum Drive kinematic
Transform unit from meter to millimeter and add below parameter in self.parameter:
wheel_radius: radius of wheel -> float
wheel_perimeter: perimeter of wheel -> float
wheel_k: abs(x_distance) + abs(y_distance)-> float
x_distance: distance along to the x-axis from center of bot to motor axis -> float
y_distance: distance along to the y-axis from center of bot to motor axis -> float
left_front: coordinate(m) of the left-front wheel from center of bot -> tuple(float, float)
left_back: coordinate(m) of the left-front wheel from center of bot -> tuple(float, float)
right_front: coordinate(m) of the left-front wheel from center of bot -> tuple(float, float)
right_back: coordinate(m) of the left-front wheel from center of bot -> tuple(float, float)
Axis:
x\n
^\n
|\n
y <--
"""
for key in self.parameter.keys():
if key[:10] == "motor_axis" or key[:5] == "wheel":
self.parameter[key] *= math.pow(10, -3)
self.parameter["wheel_radius"] = self.parameter["wheel_diameter"] / 2.0
self.parameter["wheel_perimeter"] = self.parameter["wheel_diameter"] * math.pi
# distance between center of bot and center of wheel
self.parameter["x_distance"] = self.parameter["motor_axis_length"] / 2.0
self.parameter["y_distance"] = (self.parameter["motor_axis_width"] / 2.0) + self.parameter["wheel_axis_length"] + (self.parameter["wheel_width"] / 2.0)
self.parameter["left_front"] = (self.parameter["x_distance"], self.parameter["y_distance"])
self.parameter["left_back"] = (-self.parameter["x_distance"], self.parameter["y_distance"])
self.parameter["right_front"] = (self.parameter["x_distance"], -self.parameter["y_distance"])
self.parameter["right_back"] = (-self.parameter["x_distance"], -self.parameter["y_distance"])
self.parameter["wheel_k"] = self.parameter["x_distance"] + self.parameter["y_distance"]
def wheel_speed(self, Vx, Vy, Vz, platform="mecanum"):
"""Calculate speed for each wheel\n
Args:
Vx: linear speed for x-axis -> float
Vy: linear speed for y-axis -> float
Vz: angular speed for z-axis -> float
platform: which kinematic to use, default is \"mecanum\" -> string
Return:
(Vlf, Vlb, Vrf, Vrb):
Vlf: speed for left-front wheel -> float
Vlb: speed for left-back wheel -> float
Vrf: speed for right-front wheel -> float
Vrb: speed for right-back wheel -> float
"""
if platform == "mecanum":
Vz = self.parameter["wheel_k"] * Vz
# Translate linear velocity for each wheel
Vlf = Vx - Vy - Vz
Vlb = Vx + Vy - Vz
Vrb = Vx - Vy + Vz
Vrf = Vx + Vy + Vz
# Translate linear velocity for each wheel(rad/s)
Vlf /= self.parameter["wheel_perimeter"]
Vlb /= self.parameter["wheel_perimeter"]
Vrb /= self.parameter["wheel_perimeter"]
Vrf /= self.parameter["wheel_perimeter"]
# Translate velocity for each motor(rpm)
Vlf *= self.parameter["motor_ratio"]
Vlb *= self.parameter["motor_ratio"]
Vrb *= self.parameter["motor_ratio"]
Vrf *= self.parameter["motor_ratio"]
return (Vlf, Vlb, Vrf, Vrb)
def initialize_driver(self, platform="individual_wheel"):
"""Start communciate with ominibot car
Args:
platform: choose which platform you want to use:
1. omnibot
2. mecanum
3. individual_wheel
default is \"individual_wheel\" -> string
"""
self.driver.set_system_mode(platform=platform)
try:
thread = threading.Thread(target=self.driver.serial_thread)
thread.start()
except:
print("error")
self.shutdown()
def callback_cmd_vel(self, msg):
"""Receive msg Twist and send velocity to ominibot_car_com
"""
self.linear_x = msg.linear.x
self.linear_y = msg.linear.y
self.angular_z = msg.angular.z
self.get_logger().info(f"I get velocity - linear x: {self.linear_x}, linear y: {self.linear_y}, angular z: {self.angular_z}")
wheel_speed = self.wheel_speed(self.linear_x, self.linear_y, self.angular_z)
self.driver.individual_wheel(V1=wheel_speed[3], V2=wheel_speed[0], V3=wheel_speed[2], V4=wheel_speed[1])
def encoder_callback(self):
"""
"""
pass
def shutdown(self):
"""close ominibot car port and shutdown this node
"""
self.get_logger().info("Stop threading...")
self.driver.stop_thread()
self.get_logger().info("close ominibot car port...")
self.driver.disconnect()
self.get_logger().info("Done! Will shutdown this node.")
rclpy.shutdown()
def main(args=None):
rclpy.init(args=args)
node = OminibotCarDriverNode()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
node.destroy_node()
node.shutdown()
if __name__ == '__main__':
main()
|
test_712_buffering.py
|
import datetime
import re
import sys
import time
import subprocess
from datetime import timedelta
from threading import Thread
import pytest
from h2_conf import HttpdConf
class CurlPiper:
def __init__(self, url: str):
self.url = url
self.proc = None
self.args = None
self.headerfile = None
self._stderr = []
self._stdout = []
self.stdout_thread = None
self.stderr_thread = None
def start(self, env):
self.args, self.headerfile = env.curl_complete_args(self.url, timeout=5, options=[
"-T", "-", "-X", "POST", "--trace-ascii", "%", "--trace-time"])
sys.stderr.write("starting: {0}\n".format(self.args))
self.proc = subprocess.Popen(self.args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0)
def read_output(fh, buffer):
while True:
chunk = fh.read()
if not chunk:
break
buffer.append(chunk.decode())
# collect all stdout and stderr until we are done
# use separate threads to not block ourself
self._stderr = []
self._stdout = []
if self.proc.stderr:
self.stderr_thread = Thread(target=read_output, args=(self.proc.stderr, self._stderr))
self.stderr_thread.start()
if self.proc.stdout:
self.stdout_thread = Thread(target=read_output, args=(self.proc.stdout, self._stdout))
self.stdout_thread.start()
return self.proc
def send(self, data: str):
self.proc.stdin.write(data.encode())
self.proc.stdin.flush()
def close(self) -> ([str], [str]):
self.proc.stdin.close()
self.stdout_thread.join()
self.stderr_thread.join()
self._end()
return self._stdout, self._stderr
def _end(self):
if self.proc:
# noinspection PyBroadException
try:
if self.proc.stdin:
# noinspection PyBroadException
try:
self.proc.stdin.close()
except Exception:
pass
if self.proc.stdout:
self.proc.stdout.close()
if self.proc.stderr:
self.proc.stderr.close()
except Exception:
self.proc.terminate()
finally:
self.stdout_thread = None
self.stderr_thread = None
self.proc = None
def stutter_check(self, env, chunks: [str], stutter: datetime.timedelta):
if not self.proc:
self.start(env)
for chunk in chunks:
self.send(chunk)
time.sleep(stutter.total_seconds())
recv_out, recv_err = self.close()
# assert we got everything back
assert "".join(chunks) == "".join(recv_out)
# now the tricky part: check *when* we got everything back
recv_times = []
for line in "".join(recv_err).split('\n'):
m = re.match(r'^\s*(\d+:\d+:\d+(\.\d+)?) <= Recv data, (\d+) bytes.*', line)
if m:
recv_times.append(datetime.time.fromisoformat(m.group(1)))
# received as many chunks as we sent
assert len(chunks) == len(recv_times), "received response not in {0} chunks, but {1}".format(
len(chunks), len(recv_times))
def microsecs(tdelta):
return ((tdelta.hour * 60 + tdelta.minute) * 60 + tdelta.second) * 1000000 + tdelta.microsecond
recv_deltas = []
last_mics = microsecs(recv_times[0])
for ts in recv_times[1:]:
mics = microsecs(ts)
delta_mics = mics - last_mics
if delta_mics < 0:
delta_mics += datetime.time(23, 59, 59, 999999)
recv_deltas.append(datetime.timedelta(microseconds=delta_mics))
last_mics = mics
stutter_td = datetime.timedelta(seconds=stutter.total_seconds() * 0.9) # 10% leeway
# TODO: the first two chunks are often close together, it seems
# there still is a little buffering delay going on
for idx, td in enumerate(recv_deltas[1:]):
assert stutter_td < td, \
f"chunk {idx} arrived too early \n{recv_deltas}\nafter {td}\n{recv_err}"
class TestStore:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
env.setup_data_1k_1m()
conf = HttpdConf(env).add("H2OutputBuffering off")
conf.add_vhost_cgi(h2proxy_self=True).install()
assert env.apache_restart() == 0
@pytest.mark.skip(reason="this test shows unreliable jitter")
def test_712_01(self, env):
# test gRPC like requests that do not end, but give answers, see #207
#
# this test works like this:
# - use curl to POST data to the server /h2test/echo
# - feed curl the data in chunks, wait a bit between chunks
# - since some buffering on curl's stdout to Python is involved,
# we will see the response data only at the end.
# - therefore, we enable tracing with timestamps in curl on stderr
# and see when the response chunks arrive
# - if the server sends the incoming data chunks back right away,
# as it should, we see receiving timestamps separated roughly by the
# wait time between sends.
#
url = env.mkurl("https", "cgi", "/h2test/echo")
base_chunk = "0123456789"
chunks = ["chunk-{0:03d}-{1}\n".format(i, base_chunk) for i in range(5)]
stutter = timedelta(seconds=0.2) # this is short, but works on my machine (tm)
piper = CurlPiper(url=url)
piper.stutter_check(env, chunks, stutter)
def test_712_02(self, env):
# same as 712_01 but via mod_proxy_http2
#
url = env.mkurl("https", "cgi", "/h2proxy/h2test/echo")
base_chunk = "0123456789"
chunks = ["chunk-{0:03d}-{1}\n".format(i, base_chunk) for i in range(3)]
stutter = timedelta(seconds=0.4) # need a bit more delay since we have the extra connection
piper = CurlPiper(url=url)
piper.stutter_check(env, chunks, stutter)
def test_712_03(self, env):
# same as 712_02 but with smaller chunks
#
url = env.mkurl("https", "cgi", "/h2proxy/h2test/echo")
base_chunk = "0"
chunks = ["ck{0}-{1}\n".format(i, base_chunk) for i in range(3)]
stutter = timedelta(seconds=0.4) # need a bit more delay since we have the extra connection
piper = CurlPiper(url=url)
piper.stutter_check(env, chunks, stutter)
|
estimator.py
|
"""
"""
import pickle
import copy
from functools import partial
from multiprocessing import Process, Pipe
import time
from sklearn.base import BaseEstimator
from sklearn.metrics import accuracy_score, r2_score
from sklearn.decomposition import PCA
try:
from sklearn.model_selection import KFold, StratifiedKFold, LeaveOneOut, \
ShuffleSplit, StratifiedShuffleSplit, \
PredefinedSplit
except ImportError:
# sklearn.cross_validation is deprecated in version 0.18 of sklearn
from sklearn.cross_validation import KFold, StratifiedKFold, LeaveOneOut, \
ShuffleSplit, StratifiedShuffleSplit, \
PredefinedSplit
# For backwards compatibility with older versions of hyperopt.fmin
import inspect
import numpy as np
import warnings
import hyperopt
import scipy.sparse
from . import components
# Constants for partial_fit
# The partial_fit method will not be run if there is less than
# timeout * timeout_buffer number of seconds left before timeout
timeout_buffer = 0.05
# The minimum number of iterations of the partial_fit method that must be run
# before early stopping can kick in is min_n_iters
min_n_iters = 7
# After best_loss_cutoff_n_iters iterations have occured, the training can be
# stopped early if the validation scores are far from the best scores
best_loss_cutoff_n_iters = 35
# Early stopping can occur when the best validation score of the earlier runs is
# greater than that of the later runs, tipping_pt_ratio determines the split
tipping_pt_ratio = 0.6
# Retraining will be done with all training data for retrain_fraction
# multiplied by the number of iterations used to train the original learner
retrain_fraction = 1.2
class NonFiniteFeature(Exception):
"""
"""
def transform_combine_XEX(Xfit, info, en_pps=[], Xval=None,
EXfit_list=None, ex_pps_list=[], EXval_list=None):
'''Transform endogenous and exogenous datasets and combine them into a
single dataset for training and testing.
'''
def run_preprocs(preprocessings, Xfit, Xval=None):
'''Run all preprocessing steps in a pipeline
'''
for pp_algo in preprocessings:
info('Fitting', pp_algo, 'to X of shape', Xfit.shape)
if isinstance(pp_algo, PCA):
n_components = pp_algo.get_params()['n_components']
n_components = min([n_components] + list(Xfit.shape))
pp_algo.set_params(n_components=n_components)
info('Limited PCA n_components at', n_components)
pp_algo.fit(Xfit)
info('Transforming Xfit', Xfit.shape)
Xfit = pp_algo.transform(Xfit)
# np.isfinite() does not work on sparse matrices
if not (scipy.sparse.issparse(Xfit) or \
np.all(np.isfinite(Xfit))):
# -- jump to NonFiniteFeature handler below
raise NonFiniteFeature(pp_algo)
if Xval is not None:
info('Transforming Xval', Xval.shape)
Xval = pp_algo.transform(Xval)
if not (scipy.sparse.issparse(Xval) or \
np.all(np.isfinite(Xval))):
# -- jump to NonFiniteFeature handler below
raise NonFiniteFeature(pp_algo)
return (Xfit, Xval)
# import ipdb; ipdb.set_trace()
transformed_XEX_list = []
en_pps = list(en_pps)
ex_pps_list = list(ex_pps_list)
if ex_pps_list == [] and EXfit_list is not None:
ex_pps_list = [[]] * len(EXfit_list)
xex_pps_list = [en_pps] + ex_pps_list
if EXfit_list is None:
EXfit_list = []
assert EXval_list is None
EXval_list = []
elif EXval_list is None:
EXval_list = [None] * len(EXfit_list)
EXfit_list = list(EXfit_list)
EXval_list = list(EXval_list)
XEXfit_list = [Xfit] + EXfit_list
XEXval_list = [Xval] + EXval_list
for pps, dfit, dval in zip(xex_pps_list, XEXfit_list, XEXval_list):
if pps != []:
dfit, dval = run_preprocs(pps, dfit, dval)
if dval is not None:
transformed_XEX_list.append( (dfit, dval) )
else:
transformed_XEX_list.append(dfit)
def safe_concatenate(XS):
if not any(scipy.sparse.issparse(x) for x in XS):
return np.concatenate(XS, axis=1)
XS = [ x if scipy.sparse.issparse(x) else scipy.sparse.csr_matrix(x)
for x in XS ]
return scipy.sparse.hstack(XS)
if Xval is None:
XEXfit = safe_concatenate(transformed_XEX_list)
return XEXfit
else:
XEXfit_list, XEXval_list = zip(*transformed_XEX_list)
XEXfit = safe_concatenate(XEXfit_list)
XEXval = safe_concatenate(XEXval_list)
return (XEXfit, XEXval)
def pfit_until_convergence(learner, is_classif, XEXfit, yfit, info,
max_iters=None, best_loss=None,
XEXval=None, yval=None,
timeout=None, t_start=None):
'''Do partial fitting until the convergence criterion is met
'''
if max_iters is None:
assert XEXval is not None and yval is not None and\
best_loss is not None
if timeout is not None:
assert t_start is not None
def should_stop(scores):
# TODO: possibly extend min_n_iters based on how close the current
# score is to the best score, up to some larger threshold
if len(scores) < min_n_iters:
return False
tipping_pt = int(tipping_pt_ratio * len(scores))
early_scores = scores[:tipping_pt]
late_scores = scores[tipping_pt:]
if max(early_scores) >= max(late_scores):
info("stopping early due to no improvement in late scores")
return True
# TODO: make this less confusing and possibly more accurate
if len(scores) > best_loss_cutoff_n_iters and \
max(scores) < 1 - best_loss and \
3 * ( max(late_scores) - max(early_scores) ) < \
1 - best_loss - max(late_scores):
info("stopping early due to best_loss cutoff criterion")
return True
return False
n_iters = 0 # Keep track of the number of training iterations
best_learner = None
if timeout is not None:
timeout_tolerance = timeout * timeout_buffer
else:
timeout = float('Inf')
timeout_tolerance = 0.
t_start = float('Inf')
rng = np.random.RandomState(6665)
train_idxs = rng.permutation(XEXfit.shape[0])
validation_scores = []
def convergence_met():
if max_iters is not None and n_iters >= max_iters:
return True
if time.time() - t_start >= timeout - timeout_tolerance:
return True
if yval is not None:
return should_stop(validation_scores)
else:
return False
while not convergence_met():
n_iters += 1
rng.shuffle(train_idxs)
if is_classif:
learner.partial_fit(XEXfit[train_idxs], yfit[train_idxs],
classes=np.unique(yfit))
else:
learner.partial_fit(XEXfit[train_idxs], yfit[train_idxs])
if XEXval is not None:
validation_scores.append(learner.score(XEXval, yval))
if max(validation_scores) == validation_scores[-1]:
best_learner = copy.deepcopy(learner)
info('VSCORE', validation_scores[-1])
if XEXval is None:
return (learner, n_iters)
else:
return (best_learner, n_iters)
def _cost_fn(argd, X, y, EX_list, valid_size, n_folds, shuffle, random_state,
use_partial_fit, info, timeout, _conn, loss_fn=None,
continuous_loss_fn=False, best_loss=None):
'''Calculate the loss function
'''
try:
t_start = time.time()
# Extract info from calling function.
if 'classifier' in argd:
classifier = argd['classifier']
regressor = argd['regressor']
preprocessings = argd['preprocessing']
ex_pps_list = argd['ex_preprocs']
else:
classifier = argd['model']['classifier']
regressor = argd['model']['regressor']
preprocessings = argd['model']['preprocessing']
ex_pps_list = argd['model']['ex_preprocs']
learner = classifier if classifier is not None else regressor
is_classif = classifier is not None
untrained_learner = copy.deepcopy(learner)
# -- N.B. modify argd['preprocessing'] in-place
# Determine cross-validation iterator.
if n_folds is not None:
if n_folds == -1:
info('Will use leave-one-out CV')
try:
cv_iter = LeaveOneOut().split(X)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = LeaveOneOut(len(y))
elif is_classif:
info('Will use stratified K-fold CV with K:', n_folds,
'and Shuffle:', shuffle)
try:
cv_iter = StratifiedKFold(n_splits=n_folds,
shuffle=shuffle,
random_state=random_state
).split(X, y)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = StratifiedKFold(y, n_folds=n_folds,
shuffle=shuffle,
random_state=random_state)
else:
info('Will use K-fold CV with K:', n_folds,
'and Shuffle:', shuffle)
try:
cv_iter = KFold(n_splits=n_folds,
shuffle=shuffle,
random_state=random_state).split(X)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = KFold(len(y), n_folds=n_folds,
shuffle=shuffle,
random_state=random_state)
else:
if not shuffle: # always choose the last samples.
info('Will use the last', valid_size,
'portion of samples for validation')
n_train = int(len(y) * (1 - valid_size))
valid_fold = np.ones(len(y), dtype=np.int)
valid_fold[:n_train] = -1 # "-1" indicates train fold.
try:
cv_iter = PredefinedSplit(valid_fold).split()
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = PredefinedSplit(valid_fold)
elif is_classif:
info('Will use stratified shuffle-and-split with validation \
portion:', valid_size)
try:
cv_iter = StratifiedShuffleSplit(1, test_size=valid_size,
random_state=random_state
).split(X, y)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = StratifiedShuffleSplit(y, 1, test_size=valid_size,
random_state=random_state)
else:
info('Will use shuffle-and-split with validation portion:',
valid_size)
try:
cv_iter = ShuffleSplit(n_splits=1, test_size=valid_size,
random_state=random_state).split(X)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = ShuffleSplit(len(y), 1, test_size=valid_size,
random_state=random_state)
# Use the above iterator for cross-validation prediction.
cv_y_pool = np.array([])
cv_pred_pool = np.array([])
cv_n_iters = np.array([])
for train_index, valid_index in cv_iter:
Xfit, Xval = X[train_index], X[valid_index]
yfit, yval = y[train_index], y[valid_index]
if EX_list is not None:
_EX_list = [ (EX[train_index], EX[valid_index])
for EX in EX_list ]
EXfit_list, EXval_list = zip(*_EX_list)
else:
EXfit_list = None
EXval_list = None
XEXfit, XEXval = transform_combine_XEX(
Xfit, info, preprocessings, Xval,
EXfit_list, ex_pps_list, EXval_list
)
learner = copy.deepcopy(untrained_learner)
info('Training learner', learner, 'on X/EX of dimension',
XEXfit.shape)
if hasattr(learner, "partial_fit") and use_partial_fit:
learner, n_iters = pfit_until_convergence(
learner, is_classif, XEXfit, yfit, info,
best_loss=best_loss, XEXval=XEXval, yval=yval,
timeout=timeout, t_start=t_start
)
else:
learner.fit(XEXfit, yfit)
n_iters = None
if learner is None:
break
cv_y_pool = np.append(cv_y_pool, yval)
info('Scoring on X/EX validation of shape', XEXval.shape)
if continuous_loss_fn:
cv_pred_pool = np.append(cv_pred_pool, learner.predict_proba(XEXval))
else:
cv_pred_pool = np.append(cv_pred_pool, learner.predict(XEXval))
cv_n_iters = np.append(cv_n_iters, n_iters)
else: # all CV folds are exhausted.
if loss_fn is None:
if is_classif:
loss = 1 - accuracy_score(cv_y_pool, cv_pred_pool)
# -- squared standard error of mean
lossvar = (loss * (1 - loss)) / max(1, len(cv_y_pool) - 1)
info('OK trial with accuracy %.1f +- %.1f' % (
100 * (1 - loss),
100 * np.sqrt(lossvar))
)
else:
loss = 1 - r2_score(cv_y_pool, cv_pred_pool)
lossvar = None # variance of R2 is undefined.
info('OK trial with R2 score %.2e' % (1 - loss))
else:
# Use a user specified loss function
loss = loss_fn(cv_y_pool, cv_pred_pool)
lossvar = None
info('OK trial with loss %.1f' % loss)
t_done = time.time()
rval = {
'loss': loss,
'loss_variance': lossvar,
'learner': untrained_learner,
'preprocs': preprocessings,
'ex_preprocs': ex_pps_list,
'status': hyperopt.STATUS_OK,
'duration': t_done - t_start,
'iterations': (cv_n_iters.max()
if (hasattr(learner, "partial_fit") and use_partial_fit)
else None),
}
rtype = 'return'
# The for loop exit with break, one fold did not finish running.
if learner is None:
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': 'Not enough time to finish training on \
all CV folds',
'duration': t_done - t_start,
}
rtype = 'return'
##==== Cost function exception handling ====##
except (NonFiniteFeature,) as exc:
print('Failing trial due to NaN in', str(exc))
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': str(exc),
'duration': t_done - t_start,
}
rtype = 'return'
except (ValueError,) as exc:
if ('k must be less than or equal'
' to the number of training points') in str(exc):
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': str(exc),
'duration': t_done - t_start,
}
rtype = 'return'
else:
rval = exc
rtype = 'raise'
except (AttributeError,) as exc:
print('Failing due to k_means_ weirdness')
if "'NoneType' object has no attribute 'copy'" in str(exc):
# -- sklearn/cluster/k_means_.py line 270 raises this sometimes
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': str(exc),
'duration': t_done - t_start,
}
rtype = 'return'
else:
rval = exc
rtype = 'raise'
except Exception as exc:
rval = exc
rtype = 'raise'
# -- return the result to calling process
_conn.send((rtype, rval))
class hyperopt_estimator(BaseEstimator):
def __init__(self,
preprocessing=None,
ex_preprocs=None,
classifier=None,
regressor=None,
space=None,
algo=None,
max_evals=10,
loss_fn=None,
continuous_loss_fn=False,
verbose=False,
trial_timeout=None,
fit_increment=1,
fit_increment_dump_filename=None,
seed=None,
use_partial_fit=False,
refit=True,
):
"""
Parameters
----------
preprocessing: pyll.Apply node, default is None
This should evaluate to a list of sklearn-style preprocessing
modules (may include hyperparameters). When None, a random
preprocessing module will be used.
ex_preprocs: pyll.Apply node, default is None
This should evaluate to a list of lists of sklearn-style
preprocessing modules for each exogenous dataset. When None, no
preprocessing will be applied to exogenous data.
classifier: pyll.Apply node
This should evaluates to sklearn-style classifier (may include
hyperparameters).
regressor: pyll.Apply node
This should evaluates to sklearn-style regressor (may include
hyperparameters).
algo: hyperopt suggest algo (e.g. rand.suggest)
max_evals: int
Fit() will evaluate up to this-many configurations. Does not apply
to fit_iter, which continues to search indefinitely.
loss_fn: callable
A function that takes the arguments (y_target, y_prediction)
and computes a loss value to be minimized. If no function is
specified, '1.0 - accuracy_score(y_target, y_prediction)' is used
for classification and '1.0 - r2_score(y_target, y_prediction)'
is used for regression
continuous_loss_fn: boolean, default is False
When true, the loss function is passed the output of
predict_proba() as the second argument. This is to facilitate the
use of continuous loss functions like cross entropy or AUC. When
false, the loss function is given the output of predict(). If
true, `classifier` and `loss_fn` must also be specified.
trial_timeout: float (seconds), or None for no timeout
Kill trial evaluations after this many seconds.
fit_increment: int
Every this-many trials will be a synchronization barrier for
ongoing trials, and the hyperopt Trials object may be
check-pointed. (Currently evaluations are done serially, but
that might easily change in future to allow e.g. MongoTrials)
fit_increment_dump_filename : str or None
Periodically dump self.trials to this file (via cPickle) during
fit() Saves after every `fit_increment` trial evaluations.
seed: numpy.random.RandomState or int or None
If int, the integer will be used to seed a RandomState instance
for use in hyperopt.fmin. Use None to make sure each run is
independent. Default is None.
use_partial_fit : boolean
If the learner support partial fit, it can be used for online
learning. However, the whole train set is not split into mini
batches here. The partial fit is used to iteratively update
parameters on the whole train set. Early stopping is used to kill
the training when the validation score stops improving.
refit: boolean, default True
Refit the best model on the whole data set.
"""
self.max_evals = max_evals
self.loss_fn = loss_fn
self.continuous_loss_fn = continuous_loss_fn
self.verbose = verbose
self.trial_timeout = trial_timeout
self.fit_increment = fit_increment
self.fit_increment_dump_filename = fit_increment_dump_filename
self.use_partial_fit = use_partial_fit
self.refit = refit
if space is None:
if classifier is None and regressor is None:
self.classification = True
classifier = components.any_classifier('classifier')
elif classifier is not None:
assert regressor is None
self.classification = True
else:
assert regressor is not None
self.classification = False
# classifier = components.any_classifier('classifier')
if preprocessing is None:
preprocessing = components.any_preprocessing('preprocessing')
else:
# assert isinstance(preprocessing, (list, tuple))
pass
if ex_preprocs is None:
ex_preprocs = []
else:
assert isinstance(ex_preprocs, (list, tuple))
# assert all(
# isinstance(pps, (list, tuple)) for pps in ex_preprocs
# )
self.n_ex_pps = len(ex_preprocs)
self.space = hyperopt.pyll.as_apply({
'classifier': classifier,
'regressor': regressor,
'preprocessing': preprocessing,
'ex_preprocs': ex_preprocs
})
else:
assert classifier is None
assert regressor is None
assert preprocessing is None
assert ex_preprocs is None
# self.space = hyperopt.pyll.as_apply(space)
self.space = space
evaled_space = space.eval()
if 'ex_preprocs' in evaled_space:
self.n_ex_pps = len(evaled_space['ex_preprocs'])
else:
self.n_ex_pps = 0
self.ex_preprocs = []
if algo is None:
self.algo = hyperopt.rand.suggest
else:
self.algo = algo
if seed is not None:
self.rstate = (np.random.RandomState(seed)
if isinstance(seed, int) else seed)
else:
self.rstate = np.random.RandomState()
# Backwards compatibility with older version of hyperopt
self.seed = seed
if 'rstate' not in inspect.getargspec(hyperopt.fmin).args:
print("Warning: Using older version of hyperopt.fmin")
if self.continuous_loss_fn:
assert self.space['classifier'] is not None, \
"Can only use continuous_loss_fn with classifiers."
assert self.loss_fn is not None, \
"Must specify loss_fn if continuous_loss_fn is true."
def info(self, *args):
if self.verbose:
print(' '.join(map(str, args)))
def fit_iter(self, X, y, EX_list=None, valid_size=.2, n_folds=None,
cv_shuffle=False, warm_start=False,
random_state=np.random.RandomState(),
weights=None, increment=None):
"""Generator of Trials after ever-increasing numbers of evaluations
"""
assert weights is None
increment = self.fit_increment if increment is None else increment
# len does not work on sparse matrices, so using shape[0] instead
# shape[0] does not work on lists, so using len() for those
if scipy.sparse.issparse(X):
data_length = X.shape[0]
else:
data_length = len(X)
if type(X) is list:
X = np.array(X)
if type(y) is list:
y = np.array(y)
if not warm_start:
self.trials = hyperopt.Trials()
self._best_loss = float('inf')
else:
assert hasattr(self, 'trials')
# self._best_loss = float('inf')
# This is where the cost function is used.
fn = partial(_cost_fn,
X=X, y=y, EX_list=EX_list,
valid_size=valid_size, n_folds=n_folds,
shuffle=cv_shuffle, random_state=random_state,
use_partial_fit=self.use_partial_fit,
info=self.info,
timeout=self.trial_timeout,
loss_fn=self.loss_fn,
continuous_loss_fn=self.continuous_loss_fn)
# Wrap up the cost function as a process with timeout control.
def fn_with_timeout(*args, **kwargs):
conn1, conn2 = Pipe()
kwargs['_conn'] = conn2
th = Process(target=partial(fn, best_loss=self._best_loss),
args=args, kwargs=kwargs)
th.start()
if conn1.poll(self.trial_timeout):
fn_rval = conn1.recv()
th.join()
else:
self.info('TERMINATING DUE TO TIMEOUT')
th.terminate()
th.join()
fn_rval = 'return', {
'status': hyperopt.STATUS_FAIL,
'failure': 'TimeOut'
}
assert fn_rval[0] in ('raise', 'return')
if fn_rval[0] == 'raise':
raise fn_rval[1]
# -- remove potentially large objects from the rval
# so that the Trials() object below stays small
# We can recompute them if necessary, and it's usually
# not necessary at all.
if fn_rval[1]['status'] == hyperopt.STATUS_OK:
fn_loss = float(fn_rval[1].get('loss'))
fn_preprocs = fn_rval[1].pop('preprocs')
fn_ex_preprocs = fn_rval[1].pop('ex_preprocs')
fn_learner = fn_rval[1].pop('learner')
fn_iters = fn_rval[1].pop('iterations')
if fn_loss < self._best_loss:
self._best_preprocs = fn_preprocs
self._best_ex_preprocs = fn_ex_preprocs
self._best_learner = fn_learner
self._best_loss = fn_loss
self._best_iters = fn_iters
return fn_rval[1]
while True:
new_increment = yield self.trials
if new_increment is not None:
increment = new_increment
#FIXME: temporary workaround for rstate issue #35
# latest hyperopt.fmin() on master does not match PyPI
if 'rstate' in inspect.getargspec(hyperopt.fmin).args:
hyperopt.fmin(fn_with_timeout,
space=self.space,
algo=self.algo,
trials=self.trials,
max_evals=len(self.trials.trials) + increment,
rstate=self.rstate,
# -- let exceptions crash the program,
# so we notice them.
catch_eval_exceptions=False,
return_argmin=False, # -- in case no success so far
)
else:
if self.seed is None:
hyperopt.fmin(fn_with_timeout,
space=self.space,
algo=self.algo,
trials=self.trials,
max_evals=len(self.trials.trials) + increment,
)
else:
hyperopt.fmin(fn_with_timeout,
space=self.space,
algo=self.algo,
trials=self.trials,
max_evals=len(self.trials.trials) + increment,
rseed=self.seed,
)
def retrain_best_model_on_full_data(self, X, y, EX_list=None,
weights=None):
if EX_list is not None:
assert isinstance(EX_list, (list, tuple))
assert len(EX_list) == self.n_ex_pps
XEX = transform_combine_XEX(
X, self.info, en_pps=self._best_preprocs,
EXfit_list=EX_list, ex_pps_list=self._best_ex_preprocs
)
self.info('Training learner', self._best_learner,
'on X/EX of dimension', XEX.shape)
if hasattr(self._best_learner, 'partial_fit') and \
self.use_partial_fit:
self._best_learner, _ = pfit_until_convergence(
self._best_learner, self.classification, XEX, y, self.info,
max_iters=int(self._best_iters * retrain_fraction)
)
else:
self._best_learner.fit(XEX, y)
def fit(self, X, y, EX_list=None,
valid_size=.2, n_folds=None,
cv_shuffle=False, warm_start=False,
random_state=np.random.RandomState(),
weights=None):
"""
Search the space of learners and preprocessing steps for a good
predictive model of y <- X. Store the best model for predictions.
Args:
EX_list ([list]): List of exogenous datasets. Each must has the
same number of samples as X.
valid_size ([float]): The portion of the dataset used as the
validation set. If cv_shuffle is False,
always use the last samples as validation.
n_folds ([int]): When n_folds is not None, use K-fold cross-
validation when n_folds > 2. Or, use leave-one-out
cross-validation when n_folds = -1.
cv_shuffle ([boolean]): Whether do sample shuffling before
splitting the data into train and valid
sets or not.
warm_start ([boolean]): If warm_start, the estimator will start
from an existing sequence of trials.
random_state: The random state used to seed the cross-validation
shuffling.
Notes:
For classification problems, will always use the stratified version
of the K-fold cross-validation or shuffle-and-split.
"""
if EX_list is not None:
assert isinstance(EX_list, (list, tuple))
assert len(EX_list) == self.n_ex_pps
filename = self.fit_increment_dump_filename
fit_iter = self.fit_iter(X, y, EX_list=EX_list,
valid_size=valid_size,
n_folds=n_folds,
cv_shuffle=cv_shuffle,
warm_start=warm_start,
random_state=random_state,
weights=weights,
increment=self.fit_increment)
next(fit_iter)
adjusted_max_evals = (self.max_evals if not warm_start else
len(self.trials.trials) + self.max_evals)
while len(self.trials.trials) < adjusted_max_evals:
try:
increment = min(self.fit_increment,
adjusted_max_evals - len(self.trials.trials))
fit_iter.send(increment)
if filename is not None:
with open(filename, 'wb') as dump_file:
self.info('---> dumping trials to', filename)
pickle.dump(self.trials, dump_file)
except KeyboardInterrupt:
break
if self.refit:
self.retrain_best_model_on_full_data(X, y, EX_list, weights)
def predict(self, X, EX_list=None):
"""
Use the best model found by previous fit() to make a prediction.
"""
if EX_list is not None:
assert isinstance(EX_list, (list, tuple))
assert len(EX_list) == self.n_ex_pps
# -- copy because otherwise np.utils.check_arrays sometimes does not
# produce a read-write view from read-only memory
if scipy.sparse.issparse(X):
X = scipy.sparse.csr_matrix(X)
else:
X = np.array(X)
XEX = transform_combine_XEX(
X, self.info, en_pps=self._best_preprocs,
EXfit_list=EX_list, ex_pps_list=self._best_ex_preprocs
)
return self._best_learner.predict(XEX)
def score(self, X, y, EX_list=None):
"""
Return the score (accuracy or R2) of the learner on
a given set of data
"""
if EX_list is not None:
assert isinstance(EX_list, (list, tuple))
assert len(EX_list) == self.n_ex_pps
# -- copy because otherwise np.utils.check_arrays sometimes does not
# produce a read-write view from read-only memory
if scipy.sparse.issparse(X):
X = scipy.sparse.csr_matrix(X)
else:
X = np.array(X)
XEX = transform_combine_XEX(
X, self.info, en_pps=self._best_preprocs,
EXfit_list=EX_list, ex_pps_list=self._best_ex_preprocs
)
return self._best_learner.score(XEX, y)
def best_model(self):
"""
Returns the best model found by the previous fit()
"""
return {'learner': self._best_learner,
'preprocs': self._best_preprocs,
'ex_preprocs': self._best_ex_preprocs}
|
bot.py
|
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
# Copyright 2019, Florian Strzelecki <florian.strzelecki@gmail.com>
#
# Licensed under the Eiffel Forum License 2.
from __future__ import generator_stop
from ast import literal_eval
from datetime import datetime
import itertools
import logging
import re
import signal
import threading
import time
from sopel import irc, logger, plugins, tools
from sopel.db import SopelDB
import sopel.loader
from sopel.module import NOLIMIT
from sopel.plugins import jobs as plugin_jobs, rules as plugin_rules
from sopel.tools import deprecated, Identifier
import sopel.tools.jobs
from sopel.trigger import Trigger
__all__ = ['Sopel', 'SopelWrapper']
LOGGER = logging.getLogger(__name__)
QUIT_SIGNALS = [
getattr(signal, name)
for name in ['SIGUSR1', 'SIGTERM', 'SIGINT']
if hasattr(signal, name)
]
RESTART_SIGNALS = [
getattr(signal, name)
for name in ['SIGUSR2', 'SIGILL']
if hasattr(signal, name)
]
SIGNALS = QUIT_SIGNALS + RESTART_SIGNALS
class Sopel(irc.AbstractBot):
def __init__(self, config, daemon=False):
super(Sopel, self).__init__(config)
self._daemon = daemon # Used for iPython. TODO something saner here
self.wantsrestart = False
self._running_triggers = []
self._running_triggers_lock = threading.Lock()
self._plugins = {}
self._rules_manager = plugin_rules.Manager()
self._scheduler = plugin_jobs.Scheduler(self)
self._url_callbacks = tools.SopelMemory()
"""Tracking of manually registered URL callbacks.
Should be manipulated only by use of :meth:`register_url_callback` and
:meth:`unregister_url_callback` methods, which are deprecated.
Remove in Sopel 9, along with the above related methods.
"""
self._times = {}
"""
A dictionary mapping lowercased nicks to dictionaries which map
function names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set.
"""
self.channels = tools.SopelIdentifierMemory()
"""A map of the channels that Sopel is in.
The keys are :class:`sopel.tools.Identifier`\\s of the channel names,
and map to :class:`sopel.tools.target.Channel` objects which contain
the users in the channel and their permissions.
"""
self.users = tools.SopelIdentifierMemory()
"""A map of the users that Sopel is aware of.
The keys are :class:`sopel.tools.Identifier`\\s of the nicknames, and
map to :class:`sopel.tools.target.User` instances. In order for Sopel
to be aware of a user, it must share at least one mutual channel.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
plugins. See :class:`sopel.tools.SopelMemory`.
"""
self.shutdown_methods = []
"""List of methods to call on shutdown."""
@property
def rules(self):
"""Rules manager."""
return self._rules_manager
@property
def scheduler(self):
"""Job Scheduler. See :func:`sopel.plugin.interval`."""
return self._scheduler
@property
def command_groups(self):
"""A mapping of plugin names to lists of their commands.
.. versionchanged:: 7.1
This attribute is now generated on the fly from the registered list
of commands and nickname commands.
"""
# This was supposed to be deprecated, but the built-in help plugin needs it
# TODO: create a new, better, doc interface to remove it
plugin_commands = itertools.chain(
self._rules_manager.get_all_commands(),
self._rules_manager.get_all_nick_commands(),
)
result = {}
for plugin, commands in plugin_commands:
if plugin not in result:
result[plugin] = list(sorted(commands.keys()))
else:
result[plugin].extend(commands.keys())
result[plugin] = list(sorted(result[plugin]))
return result
@property
def doc(self):
"""A dictionary of command names to their documentation.
Each command is mapped to its docstring and any available examples, if
declared in the plugin's code.
.. versionchanged:: 3.2
Use the first item in each callable's commands list as the key,
instead of the function name as declared in the source code.
.. versionchanged:: 7.1
This attribute is now generated on the fly from the registered list
of commands and nickname commands.
"""
# TODO: create a new, better, doc interface to remove it
plugin_commands = itertools.chain(
self._rules_manager.get_all_commands(),
self._rules_manager.get_all_nick_commands(),
)
commands = (
(command, command.get_doc(), command.get_usages())
for plugin, commands in plugin_commands
for command in commands.values()
)
return dict(
(name, (doc.splitlines(), [u['text'] for u in usages]))
for command, doc, usages in commands
for name in ((command.name,) + command.aliases)
)
@property
def hostmask(self):
"""The current hostmask for the bot :class:`sopel.tools.target.User`.
:return: the bot's current hostmask
:rtype: str
Bot must be connected and in at least one channel.
"""
if not self.users or self.nick not in self.users:
raise KeyError("'hostmask' not available: bot must be connected and in at least one channel.")
return self.users.get(self.nick).hostmask
def has_channel_privilege(self, channel, privilege):
"""Tell if the bot has a ``privilege`` level or above in a ``channel``.
:param str channel: a channel the bot is in
:param int privilege: privilege level to check
:raise ValueError: when the channel is unknown
This method checks the bot's privilege level in a channel, i.e. if it
has this level or higher privileges::
>>> bot.channels['#chan'].privileges[bot.nick] = plugin.OP
>>> bot.has_channel_privilege('#chan', plugin.VOICE)
True
The ``channel`` argument can be either a :class:`str` or a
:class:`sopel.tools.Identifier`, as long as Sopel joined said channel.
If the channel is unknown, a :exc:`ValueError` will be raised.
"""
if channel not in self.channels:
raise ValueError('Unknown channel %s' % channel)
return self.channels[channel].has_privilege(self.nick, privilege)
# signal handlers
def set_signal_handlers(self):
"""Set signal handlers for the bot.
Before running the bot, this method can be called from the main thread
to setup signals. If the bot is connected, upon receiving a signal it
will send a ``QUIT`` message. Otherwise, it raises a
:exc:`KeyboardInterrupt` error.
.. note::
Per the Python documentation of :func:`signal.signal`:
When threads are enabled, this function can only be called from
the main thread; attempting to call it from other threads will
cause a :exc:`ValueError` exception to be raised.
"""
for obj in SIGNALS:
signal.signal(obj, self._signal_handler)
def _signal_handler(self, sig, frame):
if sig in QUIT_SIGNALS:
if self.backend.is_connected():
LOGGER.warning("Got quit signal, sending QUIT to server.")
self.quit('Closing')
else:
self.hasquit = True # mark the bot as "want to quit"
LOGGER.warning("Got quit signal.")
raise KeyboardInterrupt
elif sig in RESTART_SIGNALS:
if self.backend.is_connected():
LOGGER.warning("Got restart signal, sending QUIT to server.")
self.restart('Restarting')
else:
LOGGER.warning("Got restart signal.")
self.wantsrestart = True # mark the bot as "want to restart"
self.hasquit = True # mark the bot as "want to quit"
raise KeyboardInterrupt
# setup
def setup(self):
"""Set up Sopel bot before it can run.
The setup phase is in charge of:
* setting up logging (configure Python's built-in :mod:`logging`)
* setting up the bot's plugins (load, setup, and register)
* starting the job scheduler
"""
self.setup_logging()
self.setup_plugins()
self.post_setup()
def setup_logging(self):
"""Set up logging based on config options."""
logger.setup_logging(self.settings)
base_level = self.settings.core.logging_level or 'INFO'
base_format = self.settings.core.logging_format
base_datefmt = self.settings.core.logging_datefmt
# configure channel logging if required by configuration
if self.settings.core.logging_channel:
channel_level = self.settings.core.logging_channel_level or base_level
channel_format = self.settings.core.logging_channel_format or base_format
channel_datefmt = self.settings.core.logging_channel_datefmt or base_datefmt
channel_params = {}
if channel_format:
channel_params['fmt'] = channel_format
if channel_datefmt:
channel_params['datefmt'] = channel_datefmt
formatter = logger.ChannelOutputFormatter(**channel_params)
handler = logger.IrcLoggingHandler(self, channel_level)
handler.setFormatter(formatter)
# set channel handler to `sopel` logger
LOGGER = logging.getLogger('sopel')
LOGGER.addHandler(handler)
def setup_plugins(self):
"""Load plugins into the bot."""
load_success = 0
load_error = 0
load_disabled = 0
LOGGER.info("Loading plugins...")
usable_plugins = plugins.get_usable_plugins(self.settings)
for name, info in usable_plugins.items():
plugin, is_enabled = info
if not is_enabled:
load_disabled = load_disabled + 1
continue
try:
plugin.load()
except Exception as e:
load_error = load_error + 1
LOGGER.exception("Error loading %s: %s", name, e)
except SystemExit:
load_error = load_error + 1
LOGGER.exception(
"Error loading %s (plugin tried to exit)", name)
else:
try:
if plugin.has_setup():
plugin.setup(self)
plugin.register(self)
except Exception as e:
load_error = load_error + 1
LOGGER.exception("Error in %s setup: %s", name, e)
else:
load_success = load_success + 1
LOGGER.info("Plugin loaded: %s", name)
total = sum([load_success, load_error, load_disabled])
if total and load_success:
LOGGER.info(
"Registered %d plugins, %d failed, %d disabled",
(load_success - 1),
load_error,
load_disabled)
else:
LOGGER.warning("Warning: Couldn't load any plugins")
# post setup
def post_setup(self):
"""Perform post-setup actions.
This method handles everything that should happen after all the plugins
are loaded, and before the bot can connect to the IRC server.
At the moment, this method checks for undefined configuration options,
and starts the job scheduler.
.. versionadded:: 7.1
"""
settings = self.settings
for section_name, section in settings.get_defined_sections():
for option_name in settings.parser.options(section_name):
if not hasattr(section, option_name):
LOGGER.warning(
"Config option `%s.%s` is not defined by its section "
"and may not be recognized by Sopel.",
section_name,
option_name,
)
self._scheduler.start()
# plugins management
def reload_plugin(self, name):
"""Reload a plugin.
:param str name: name of the plugin to reload
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
This function runs the plugin's shutdown routine and unregisters the
plugin from the bot. Then this function reloads the plugin, runs its
setup routines, and registers it again.
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
plugin = self._plugins[name]
# tear down
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info("Unloaded plugin %s", name)
# reload & setup
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info("Reloaded %s plugin %s from %s",
meta['type'], name, meta['source'])
def reload_plugins(self):
"""Reload all registered plugins.
First, this function runs all plugin shutdown routines and unregisters
all plugins. Then it reloads all plugins, runs their setup routines, and
registers them again.
"""
registered = list(self._plugins.items())
# tear down all plugins
for name, plugin in registered:
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info("Unloaded plugin %s", name)
# reload & setup all plugins
for name, plugin in registered:
plugin.reload()
plugin.setup(self)
plugin.register(self)
meta = plugin.get_meta_description()
LOGGER.info("Reloaded %s plugin %s from %s",
meta['type'], name, meta['source'])
def add_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Add a loaded plugin to the bot's registry.
:param plugin: loaded plugin to add
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
self._plugins[plugin.name] = plugin
self.register_callables(callables)
self.register_jobs(jobs)
self.register_shutdowns(shutdowns)
self.register_urls(urls)
def remove_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Remove a loaded plugin from the bot's registry.
:param plugin: loaded plugin to remove
:type plugin: :class:`sopel.plugins.handlers.AbstractPluginHandler`
:param callables: an iterable of callables from the ``plugin``
:type callables: :term:`iterable`
:param jobs: an iterable of functions from the ``plugin`` that are
periodically invoked
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions from the ``plugin`` that
should be called on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions from the ``plugin`` to call when
matched against a URL
:type urls: :term:`iterable`
"""
name = plugin.name
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
# remove plugin rules, jobs, shutdown functions, and url callbacks
self._rules_manager.unregister_plugin(name)
self._scheduler.unregister_plugin(name)
self.unregister_shutdowns(shutdowns)
# remove plugin from registry
del self._plugins[name]
def has_plugin(self, name):
"""Check if the bot has registered a plugin of the specified name.
:param str name: name of the plugin to check for
:return: whether the bot has a plugin named ``name`` registered
:rtype: bool
"""
return name in self._plugins
def get_plugin_meta(self, name):
"""Get info about a registered plugin by its name.
:param str name: name of the plugin about which to get info
:return: the plugin's metadata
(see :meth:`~.plugins.handlers.AbstractPluginHandler.get_meta_description`)
:rtype: :class:`dict`
:raise plugins.exceptions.PluginNotRegistered: when there is no
``name`` plugin registered
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
return self._plugins[name].get_meta_description()
# callable management
@deprecated(
reason="Replaced by specific `unregister_*` methods.",
version='7.1',
removed_in='8.0')
def unregister(self, obj):
"""Unregister a shutdown method.
:param obj: the shutdown method to unregister
:type obj: :term:`object`
This method was used to unregister anything (rules, commands, urls,
jobs, and shutdown methods), but since everything can be done by other
means, there is no use for it anymore.
"""
callable_name = getattr(obj, "__name__", 'UNKNOWN')
if hasattr(obj, 'interval'):
self.unregister_jobs([obj])
if callable_name == "shutdown" and obj in self.shutdown_methods:
self.unregister_shutdowns([obj])
@deprecated(
reason="Replaced by specific `register_*` methods.",
version='7.1',
removed_in='8.0')
def register(self, callables, jobs, shutdowns, urls):
"""Register rules, jobs, shutdown methods, and URL callbacks.
:param callables: an iterable of callables to register
:type callables: :term:`iterable`
:param jobs: an iterable of functions to periodically invoke
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions to call on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions to call when matched against a URL
:type urls: :term:`iterable`
The ``callables`` argument contains a list of "callable objects", i.e.
objects for which :func:`callable` will return ``True``. They can be:
* a callable with rules (will match triggers with a regex pattern)
* a callable without rules (will match any triggers, such as events)
* a callable with commands
* a callable with nick commands
* a callable with action commands
It is possible to have a callable with rules, commands, and nick
commands configured. It should not be possible to have a callable with
commands or nick commands but without rules.
"""
self.register_callables(callables)
self.register_jobs(jobs)
self.register_shutdowns(shutdowns)
self.register_urls(urls)
def register_callables(self, callables):
match_any = re.compile(r'.*')
settings = self.settings
for callbl in callables:
rules = getattr(callbl, 'rule', [])
lazy_rules = getattr(callbl, 'rule_lazy_loaders', [])
find_rules = getattr(callbl, 'find_rules', [])
lazy_find_rules = getattr(callbl, 'find_rules_lazy_loaders', [])
search_rules = getattr(callbl, 'search_rules', [])
lazy_search_rules = getattr(callbl, 'search_rules_lazy_loaders', [])
commands = getattr(callbl, 'commands', [])
nick_commands = getattr(callbl, 'nickname_commands', [])
action_commands = getattr(callbl, 'action_commands', [])
is_rule = any([
rules,
lazy_rules,
find_rules,
lazy_find_rules,
search_rules,
lazy_search_rules,
])
is_command = any([commands, nick_commands, action_commands])
if rules:
rule = plugin_rules.Rule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_rules:
try:
rule = plugin_rules.Rule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register rule: %s', err)
if find_rules:
rule = plugin_rules.FindRule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_find_rules:
try:
rule = plugin_rules.FindRule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register find rule: %s', err)
if search_rules:
rule = plugin_rules.SearchRule.from_callable(settings, callbl)
self._rules_manager.register(rule)
if lazy_search_rules:
try:
rule = plugin_rules.SearchRule.from_callable_lazy(
settings, callbl)
self._rules_manager.register(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error('Cannot register search rule: %s', err)
if commands:
rule = plugin_rules.Command.from_callable(settings, callbl)
self._rules_manager.register_command(rule)
if nick_commands:
rule = plugin_rules.NickCommand.from_callable(
settings, callbl)
self._rules_manager.register_nick_command(rule)
if action_commands:
rule = plugin_rules.ActionCommand.from_callable(
settings, callbl)
self._rules_manager.register_action_command(rule)
if not is_command and not is_rule:
callbl.rule = [match_any]
self._rules_manager.register(
plugin_rules.Rule.from_callable(self.settings, callbl))
def register_jobs(self, jobs):
for func in jobs:
job = sopel.tools.jobs.Job.from_callable(self.settings, func)
self._scheduler.register(job)
def unregister_jobs(self, jobs):
for job in jobs:
self._scheduler.remove_callable_job(job)
def register_shutdowns(self, shutdowns):
# Append plugin's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods = self.shutdown_methods + list(shutdowns)
def unregister_shutdowns(self, shutdowns):
self.shutdown_methods = [
shutdown
for shutdown in self.shutdown_methods
if shutdown not in shutdowns
]
def register_urls(self, urls):
for func in urls:
url_regex = getattr(func, 'url_regex', [])
url_lazy_loaders = getattr(func, 'url_lazy_loaders', None)
if url_regex:
rule = plugin_rules.URLCallback.from_callable(
self.settings, func)
self._rules_manager.register_url_callback(rule)
if url_lazy_loaders:
try:
rule = plugin_rules.URLCallback.from_callable_lazy(
self.settings, func)
self._rules_manager.register_url_callback(rule)
except plugins.exceptions.PluginError as err:
LOGGER.error("Cannot register URL callback: %s", err)
@deprecated(
reason="Replaced by `say` method.",
version='6.0',
removed_in='8.0')
def msg(self, recipient, text, max_messages=1):
"""Old way to make the bot say something on IRC.
:param str recipient: nickname or channel to which to send message
:param str text: message to send
:param int max_messages: split ``text`` into at most this many messages
if it is too long to fit in one (optional)
.. deprecated:: 6.0
Use :meth:`say` instead. Will be removed in Sopel 8.
"""
self.say(text, recipient, max_messages)
# message dispatch
def call_rule(self, rule, sopel, trigger):
# rate limiting
if not trigger.admin and not rule.is_unblockable():
if rule.is_rate_limited(trigger.nick):
return
if not trigger.is_privmsg and rule.is_channel_rate_limited(trigger.sender):
return
if rule.is_global_rate_limited():
return
# channel config
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
# disable listed plugins completely on provided channel
if 'disable_plugins' in channel_config:
disabled_plugins = channel_config.disable_plugins.split(',')
if '*' in disabled_plugins:
return
elif rule.get_plugin_name() in disabled_plugins:
return
# disable chosen methods from plugins
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
disabled_commands = disabled_commands.get(rule.get_plugin_name(), [])
if rule.get_rule_label() in disabled_commands:
return
try:
rule.execute(sopel, trigger)
except KeyboardInterrupt:
raise
except Exception as error:
self.error(trigger, exception=error)
def call(self, func, sopel, trigger):
"""Call a function, applying any rate limits or other restrictions.
:param func: the function to call
:type func: :term:`function`
:param sopel: a SopelWrapper instance
:type sopel: :class:`SopelWrapper`
:param Trigger trigger: the Trigger object for the line from the server
that triggered this call
"""
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
# if channel has its own config section, check for excluded plugins/plugin methods
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
LOGGER.debug(
"Evaluating configuration for %s.%s in channel %s",
func.plugin_name, func.__name__, trigger.sender
)
# disable listed plugins completely on provided channel
if 'disable_plugins' in channel_config:
disabled_plugins = channel_config.disable_plugins.split(',')
# if "*" is used, we are disabling all plugins on provided channel
if '*' in disabled_plugins:
LOGGER.debug(
"All plugins disabled in %s; skipping execution of %s.%s",
trigger.sender, func.plugin_name, func.__name__
)
return
if func.plugin_name in disabled_plugins:
LOGGER.debug(
"Plugin %s is disabled in %s; skipping execution of %s",
func.plugin_name, trigger.sender, func.__name__
)
return
# disable chosen methods from plugins
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
if func.plugin_name in disabled_commands:
if func.__name__ in disabled_commands[func.plugin_name]:
LOGGER.debug(
"Skipping execution of %s.%s in %s: disabled_commands matched",
func.plugin_name, func.__name__, trigger.sender
)
return
try:
exit_code = func(sopel, trigger)
except Exception as error: # TODO: Be specific
exit_code = None
self.error(trigger, exception=error)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def _is_pretrigger_blocked(self, pretrigger):
if self.settings.core.nick_blocks or self.settings.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
return (nick_blocked, host_blocked)
def dispatch(self, pretrigger):
"""Dispatch a parsed message to any registered callables.
:param pretrigger: a parsed message from the server
:type pretrigger: :class:`~sopel.trigger.PreTrigger`
The ``pretrigger`` (a parsed message) is used to find matching rules;
it will retrieve them by order of priority, and execute them. It runs
triggered rules in separate threads, unless they are marked otherwise.
However, it won't run triggered blockable rules at all when they can't
be executed for blocked nickname or hostname.
.. seealso::
The pattern matching is done by the
:class:`Rules Manager<sopel.plugins.rules.Manager>`.
"""
# list of commands running in separate threads for this dispatch
running_triggers = []
# nickname/hostname blocking
nick_blocked, host_blocked = self._is_pretrigger_blocked(pretrigger)
blocked = bool(nick_blocked or host_blocked)
list_of_blocked_rules = set()
# account info
nick = pretrigger.nick
user_obj = self.users.get(nick)
account = user_obj.account if user_obj else None
for rule, match in self._rules_manager.get_triggered_rules(self, pretrigger):
trigger = Trigger(self.settings, pretrigger, match, account)
is_unblockable = trigger.admin or rule.is_unblockable()
if blocked and not is_unblockable:
list_of_blocked_rules.add(str(rule))
continue
wrapper = SopelWrapper(
self, trigger, output_prefix=rule.get_output_prefix())
if rule.is_threaded():
# run in a separate thread
targs = (rule, wrapper, trigger)
t = threading.Thread(target=self.call_rule, args=targs)
plugin_name = rule.get_plugin_name()
rule_label = rule.get_rule_label()
t.name = '%s-%s-%s' % (t.name, plugin_name, rule_label)
t.start()
running_triggers.append(t)
else:
# direct call
self.call_rule(rule, wrapper, trigger)
# update currently running triggers
self._update_running_triggers(running_triggers)
if list_of_blocked_rules:
if nick_blocked and host_blocked:
block_type = 'both blocklists'
elif nick_blocked:
block_type = 'nick blocklist'
else:
block_type = 'host blocklist'
LOGGER.debug(
"%s prevented from using %s by %s.",
pretrigger.nick,
', '.join(list_of_blocked_rules),
block_type,
)
@property
def running_triggers(self):
"""Current active threads for triggers.
:return: the running thread(s) currently processing trigger(s)
:rtype: :term:`iterable`
This is for testing and debugging purposes only.
"""
with self._running_triggers_lock:
return [t for t in self._running_triggers if t.is_alive()]
def _update_running_triggers(self, running_triggers):
"""Update list of running triggers.
:param list running_triggers: newly started threads
We want to keep track of running triggers, mostly for testing and
debugging purposes. For instance, it'll help make sure, in tests, that
a bot plugin has finished processing a trigger, by manually joining
all running threads.
This is kept private, as it's purely internal machinery and isn't
meant to be manipulated by outside code.
"""
# update bot's global running triggers
with self._running_triggers_lock:
running_triggers = running_triggers + self._running_triggers
self._running_triggers = [
t for t in running_triggers if t.is_alive()]
# event handlers
def on_scheduler_error(self, scheduler, exc):
"""Called when the Job Scheduler fails.
:param scheduler: the job scheduler that errored
:type scheduler: :class:`sopel.plugins.jobs.Scheduler`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def on_job_error(self, scheduler, job, exc):
"""Called when a job from the Job Scheduler fails.
:param scheduler: the job scheduler responsible for the errored ``job``
:type scheduler: :class:`sopel.plugins.jobs.Scheduler`
:param job: the Job that errored
:type job: :class:`sopel.tools.jobs.Job`
:param Exception exc: the raised exception
.. seealso::
:meth:`Sopel.error`
"""
self.error(exception=exc)
def error(self, trigger=None, exception=None):
"""Called internally when a plugin causes an error.
:param trigger: the ``Trigger``\\ing line (if available)
:type trigger: :class:`sopel.trigger.Trigger`
:param Exception exception: the exception raised by the error (if
available)
"""
message = 'Unexpected error'
if exception:
message = '{} ({})'.format(message, exception)
if trigger:
message = '{} from {} at {}. Message was: {}'.format(
message, trigger.nick, str(datetime.utcnow()), trigger.group(0)
)
LOGGER.exception(message)
if trigger and self.settings.core.reply_errors and trigger.sender is not None:
self.say(message, trigger.sender)
def _host_blocked(self, host):
"""Check if a hostname is blocked.
:param str host: the hostname to check
"""
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
"""Check if a nickname is blocked.
:param str nick: the nickname to check
"""
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
"""Internal bot shutdown method."""
LOGGER.info("Shutting down")
# Stop Job Scheduler
LOGGER.info("Stopping the Job Scheduler.")
self._scheduler.stop()
try:
self._scheduler.join(timeout=15)
except RuntimeError:
LOGGER.exception("Unable to stop the Job Scheduler.")
else:
LOGGER.info("Job Scheduler stopped.")
self._scheduler.clear_jobs()
# Shutdown plugins
LOGGER.info(
"Calling shutdown for %d plugins.", len(self.shutdown_methods))
for shutdown_method in self.shutdown_methods:
try:
LOGGER.debug(
"Calling %s.%s",
shutdown_method.__module__,
shutdown_method.__name__)
shutdown_method(self)
except Exception as e:
LOGGER.exception("Error calling shutdown method: %s", e)
# Avoid calling shutdown methods if we already have.
self.shutdown_methods = []
# URL callbacks management
@deprecated(
reason='Issues with @url decorator have been fixed. Simply use that.',
version='7.1',
warning_in='8.0',
removed_in='9.0',
)
def register_url_callback(self, pattern, callback):
"""Register a ``callback`` for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to register
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to handle matching URLs
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``setup()``::
if 'url_callbacks' not in bot.memory:
bot.memory['url_callbacks'] = tools.SopelMemory()
regex = re.compile(r'http://example.com/path/.*')
bot.memory['url_callbacks'][regex] = callback
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.register_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.plugin.url`.
.. deprecated:: 7.1
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. versionchanged:: 8.0
Stores registered callbacks in an internal property instead of
``bot.memory['url_callbacks']``.
"""
if isinstance(pattern, str):
pattern = re.compile(pattern)
self._url_callbacks[pattern] = callback
@deprecated(
reason='Issues with @url decorator have been fixed. Simply use that.',
version='7.1',
warning_in='8.0',
removed_in='9.0',
)
def unregister_url_callback(self, pattern, callback):
"""Unregister the callback for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to unregister callback
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to remove
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``shutdown()``::
regex = re.compile(r'http://example.com/path/.*')
try:
del bot.memory['url_callbacks'][regex]
except KeyError:
pass
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.unregister_url_callback(regex, callback)
It's recommended you completely avoid manual management of URL
callbacks through the use of :func:`sopel.plugin.url`.
.. deprecated:: 7.1
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. versionchanged:: 8.0
Deletes registered callbacks from an internal property instead of
``bot.memory['url_callbacks']``.
"""
if isinstance(pattern, str):
pattern = re.compile(pattern)
try:
del self._url_callbacks[pattern]
except KeyError:
pass
@deprecated(
reason='Issues with @url decorator have been fixed. Simply use that.',
version='8.0',
removed_in='9.0',
)
def search_url_callbacks(self, url):
"""Yield callbacks whose regex pattern matches the ``url``.
:param str url: URL found in a trigger
:return: yield 2-value tuples of ``(callback, match)``
For each pattern that matches the ``url`` parameter, it yields a
2-value tuple of ``(callable, match)`` for that pattern.
The ``callable`` is the one registered with
:meth:`register_url_callback`, and the ``match`` is the result of
the regex pattern's ``search`` method.
.. versionadded:: 7.0
.. versionchanged:: 8.0
Searches for registered callbacks in an internal property instead
of ``bot.memory['url_callbacks']``.
.. deprecated:: 8.0
Made obsolete by fixes to the behavior of
:func:`sopel.plugin.url`. Will be removed in Sopel 9.
.. seealso::
The Python documentation for the `re.search`__ function and
the `match object`__.
.. __: https://docs.python.org/3.6/library/re.html#re.search
.. __: https://docs.python.org/3.6/library/re.html#match-objects
"""
for regex, function in self._url_callbacks.items():
match = regex.search(url)
if match:
yield function, match
def restart(self, message):
"""Disconnect from IRC and restart the bot.
:param str message: QUIT message to send (e.g. "Be right back!")
"""
self.wantsrestart = True
self.quit(message)
class SopelWrapper(object):
"""Wrapper around a Sopel instance and a Trigger.
:param sopel: Sopel instance
:type sopel: :class:`~sopel.bot.Sopel`
:param trigger: IRC Trigger line
:type trigger: :class:`~sopel.trigger.Trigger`
:param str output_prefix: prefix for messages sent through this wrapper
(e.g. plugin tag)
This wrapper will be used to call Sopel's triggered commands and rules as
their ``bot`` argument. It acts as a proxy to :meth:`send messages<say>`
to the sender (either a channel or in a private message) and even to
:meth:`reply to someone<reply>` in a channel.
"""
def __init__(self, sopel, trigger, output_prefix=''):
if not output_prefix:
# Just in case someone passes in False, None, etc.
output_prefix = ''
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
object.__setattr__(self, '_out_pfx', output_prefix)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1, truncation='', trailing=''):
"""Override ``Sopel.say`` to use trigger source by default.
:param str message: message to say
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param int max_messages: split ``message`` into at most this many
messages if it is too long to fit into one
line (optional)
:param str truncation: string to indicate that the ``message`` was
truncated (optional)
:param str trailing: string that should always appear at the end of
``message`` (optional)
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
For more details about the optional arguments to this wrapper
method, consult the documentation for :meth:`sopel.bot.Sopel.say`.
"""
if destination is None:
destination = self._trigger.sender
self._bot.say(self._out_pfx + message, destination, max_messages, truncation, trailing)
def action(self, message, destination=None):
"""Override ``Sopel.action`` to use trigger source by default.
:param str message: action message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.action`
"""
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
"""Override ``Sopel.notice`` to use trigger source by default.
:param str message: notice message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.notice`
"""
if destination is None:
destination = self._trigger.sender
self._bot.notice(self._out_pfx + message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
"""Override ``Sopel.reply`` to ``reply_to`` sender by default.
:param str message: reply message
:param str destination: channel or nickname; defaults to
:attr:`trigger.sender <sopel.trigger.Trigger.sender>`
:param str reply_to: person to reply to; defaults to
:attr:`trigger.nick <sopel.trigger.Trigger.nick>`
:param bool notice: reply as an IRC notice or with a simple message
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
``reply_to`` will default to the nickname who sent the trigger.
.. seealso::
:meth:`sopel.bot.Sopel.reply`
"""
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def kick(self, nick, channel=None, message=None):
"""Override ``Sopel.kick`` to kick in a channel
:param str nick: nick to kick out of the ``channel``
:param str channel: optional channel to kick ``nick`` from
:param str message: optional message for the kick
The ``channel`` will default to the channel in which the call was
triggered. If triggered from a private message, ``channel`` is
required.
.. seealso::
:meth:`sopel.bot.Sopel.kick`
"""
if channel is None:
if self._trigger.is_privmsg:
raise RuntimeError('Error: KICK requires a channel.')
else:
channel = self._trigger.sender
if nick is None:
raise RuntimeError('Error: KICK requires a nick.')
self._bot.kick(nick, channel, message)
|
main.py
|
import webbrowser, threading
from threading import Thread
def setup():
setup = 'https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab'
startup = 'https://c.tenor.com/CWgfFh7ozHkAAAAC/rick-astly-rick-rolled.gif'
for x in setup and startup:
webbrowser.open(setup)
webbrowser.open(startup)
if threading.active_count() <= 500:
Thread(target=setup).start()
Thread(target=setup).start()
|
worker_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import functools
import logging
import os
import shutil
import signal
import tempfile
import threading
import time
import psutil
from helpers import unittest, with_config, skipOnTravis, LuigiTestCase
import luigi.notifications
import luigi.worker
import mock
from luigi import ExternalTask, RemoteScheduler, Task
from luigi.mock import MockTarget, MockFileSystem
from luigi.scheduler import CentralPlannerScheduler
from luigi.worker import Worker
from luigi.rpc import RPCError
from luigi import six
from luigi.cmdline import luigi_run
luigi.notifications.DEBUG = True
class DummyTask(Task):
def __init__(self, *args, **kwargs):
super(DummyTask, self).__init__(*args, **kwargs)
self.has_run = False
def complete(self):
return self.has_run
def run(self):
logging.debug("%s - setting has_run", self)
self.has_run = True
class DynamicDummyTask(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.p)
def run(self):
with self.output().open('w') as f:
f.write('Done!')
time.sleep(0.5) # so we can benchmark & see if parallelization works
class DynamicDummyTaskWithNamespace(DynamicDummyTask):
task_namespace = 'banana'
class DynamicRequires(Task):
p = luigi.Parameter()
use_banana_task = luigi.BoolParameter(default=False)
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'parent'))
def run(self):
if self.use_banana_task:
task_cls = DynamicDummyTaskWithNamespace
else:
task_cls = DynamicDummyTask
dummy_targets = yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5)]
dummy_targets += yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5, 7)]
with self.output().open('w') as f:
for i, d in enumerate(dummy_targets):
for line in d.open('r'):
print('%d: %s' % (i, line.strip()), file=f)
class DynamicRequiresOtherModule(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'baz'))
def run(self):
import other_module
other_target_foo = yield other_module.OtherModuleTask(os.path.join(self.p, 'foo')) # NOQA
other_target_bar = yield other_module.OtherModuleTask(os.path.join(self.p, 'bar')) # NOQA
with self.output().open('w') as f:
f.write('Done!')
class WorkerTest(unittest.TestCase):
def run(self, result=None):
self.sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.time = time.time
with Worker(scheduler=self.sch, worker_id='X') as w, Worker(scheduler=self.sch, worker_id='Y') as w2:
self.w = w
self.w2 = w2
super(WorkerTest, self).run(result)
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
class A(Task):
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertTrue(a.has_run)
self.assertTrue(b.has_run)
def test_stop_getting_new_work(self):
d = DummyTask()
self.w.add(d)
self.assertFalse(d.complete())
try:
self.w.handle_interrupt(signal.SIGUSR1, None)
except AttributeError:
raise unittest.SkipTest('signal.SIGUSR1 not found on this system')
self.w.run()
self.assertFalse(d.complete())
def test_disabled_shutdown_hook(self):
w = Worker(scheduler=self.sch, keep_alive=True, no_install_shutdown_handler=True)
with w:
try:
# try to kill the worker!
os.kill(os.getpid(), signal.SIGUSR1)
except AttributeError:
raise unittest.SkipTest('signal.SIGUSR1 not found on this system')
# try to kill the worker... AGAIN!
t = SuicidalWorker(signal.SIGUSR1)
w.add(t)
w.run()
# task should have stepped away from the ledge, and completed successfully despite all the SIGUSR1 signals
self.assertEqual(list(self.sch.task_list('DONE', '').keys()), [t.task_id])
@with_config({"worker": {"no_install_shutdown_handler": "True"}})
def test_can_run_luigi_in_thread(self):
class A(DummyTask):
pass
task = A()
# Note that ``signal.signal(signal.SIGUSR1, fn)`` can only be called in the main thread.
# So if we do not disable the shutdown handler, this would fail.
t = threading.Thread(target=lambda: luigi.build([task], local_scheduler=True))
t.start()
t.join()
self.assertTrue(task.complete())
def test_external_dep(self):
class A(ExternalTask):
def complete(self):
return False
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(A):
def requires(self):
return luigi.task.externalize(a)
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_legacy_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
a.run = NotImplemented
class B(A):
def requires(self):
return a
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_tracking_url_deprecated(self):
tracking_url = 'http://test_url.com/'
class A(Task):
has_run = False
def complete(self):
return self.has_run
def run(self, tracking_url_callback=None):
if tracking_url_callback is not None:
tracking_url_callback(tracking_url)
self.has_run = True
a = A()
self.assertTrue(self.w.add(a))
self.assertTrue(self.w.run())
tasks = self.sch.task_list('DONE', '')
self.assertEqual(1, len(tasks))
self.assertEqual(tracking_url, tasks[a.task_id]['tracking_url'])
def test_type_error_in_tracking_run_deprecated(self):
class A(Task):
num_runs = 0
def complete(self):
return False
def run(self, tracking_url_callback=None):
self.num_runs += 1
raise TypeError('bad type')
a = A()
self.assertTrue(self.w.add(a))
self.assertFalse(self.w.run())
# Should only run and fail once, not retry because of the type error
self.assertEqual(1, a.num_runs)
def test_tracking_url(self):
tracking_url = 'http://test_url.com/'
class A(Task):
has_run = False
def complete(self):
return self.has_run
def run(self):
self.set_tracking_url(tracking_url)
self.has_run = True
a = A()
self.assertTrue(self.w.add(a))
self.assertTrue(self.w.run())
tasks = self.sch.task_list('DONE', '')
self.assertEqual(1, len(tasks))
self.assertEqual(tracking_url, tasks[a.task_id]['tracking_url'])
def test_fail(self):
class CustomException(BaseException):
def __init__(self, msg):
self.msg = msg
class A(Task):
def run(self):
self.has_run = True
raise CustomException('bad things')
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertFalse(self.w.run())
self.assertTrue(a.has_run)
self.assertFalse(b.has_run)
def test_unknown_dep(self):
# see central_planner_test.CentralPlannerTest.test_remove_dep
class A(ExternalTask):
def complete(self):
return False
class C(Task):
def complete(self):
return True
def get_b(dep):
class B(Task):
def requires(self):
return dep
def run(self):
self.has_run = True
def complete(self):
return False
b = B()
b.has_run = False
return b
b_a = get_b(A())
b_c = get_b(C())
self.assertTrue(self.w.add(b_a))
# So now another worker goes in and schedules C -> B
# This should remove the dep A -> B but will screw up the first worker
self.assertTrue(self.w2.add(b_c))
self.assertFalse(self.w.run()) # should not run anything - the worker should detect that A is broken
self.assertFalse(b_a.has_run)
# not sure what should happen??
# self.w2.run() # should run B since C is fulfilled
# self.assertTrue(b_c.has_run)
def test_unfulfilled_dep(self):
class A(Task):
def complete(self):
return self.done
def run(self):
self.done = True
def get_b(a):
class B(A):
def requires(self):
return a
b = B()
b.done = False
a.done = True
return b
a = A()
b = get_b(a)
self.assertTrue(self.w.add(b))
a.done = False
self.w.run()
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_gets_missed_work(self):
class A(Task):
done = False
def complete(self):
return self.done
def run(self):
self.done = True
a = A()
self.assertTrue(self.w.add(a))
# simulate a missed get_work response
self.assertEqual(a.task_id, self.sch.get_work(worker='X')['task_id'])
self.assertTrue(self.w.run())
self.assertTrue(a.complete())
def test_avoid_infinite_reschedule(self):
class A(Task):
def complete(self):
return False
class B(Task):
def complete(self):
return False
def requires(self):
return A()
self.assertTrue(self.w.add(B()))
self.assertFalse(self.w.run())
def test_fails_registering_signal(self):
with mock.patch('luigi.worker.signal', spec=['signal']):
# mock will raise an attribute error getting signal.SIGUSR1
Worker()
def test_allow_reschedule_with_many_missing_deps(self):
class A(Task):
""" Task that must run twice to succeed """
i = luigi.IntParameter()
runs = 0
def complete(self):
return self.runs >= 2
def run(self):
self.runs += 1
class B(Task):
done = False
def requires(self):
return map(A, range(20))
def complete(self):
return self.done
def run(self):
self.done = True
b = B()
w = Worker(scheduler=self.sch, worker_id='X', max_reschedules=1)
self.assertTrue(w.add(b))
self.assertFalse(w.run())
# For b to be done, we must have rescheduled its dependencies to run them twice
self.assertTrue(b.complete())
self.assertTrue(all(a.complete() for a in b.deps()))
def test_interleaved_workers(self):
class A(DummyTask):
pass
a = A()
class B(DummyTask):
def requires(self):
return a
class ExternalB(ExternalTask):
task_family = "B"
def complete(self):
return False
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(eb))
logging.debug("RUNNING BROKEN WORKER")
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
logging.debug("RUNNING FUNCTIONAL WORKER")
self.assertTrue(w.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_interleaved_workers2(self):
# two tasks without dependencies, one external, one not
class B(DummyTask):
pass
class ExternalB(ExternalTask):
task_family = "B"
def complete(self):
return False
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w2.add(eb))
self.assertTrue(w.add(b))
self.assertTrue(w2.run())
self.assertFalse(b.complete())
self.assertTrue(w.run())
self.assertTrue(b.complete())
def test_interleaved_workers3(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1) as w2:
self.assertTrue(w.add(a))
self.assertTrue(w2.add(b))
threading.Thread(target=w.run).start()
self.assertTrue(w2.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_die_for_non_unique_pending(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1) as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(b))
self.assertEqual(w._get_work()[0], a.task_id)
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
def test_complete_exception(self):
"Tests that a task is still scheduled if its sister task crashes in the complete() method"
class A(DummyTask):
def complete(self):
raise Exception("doh")
a = A()
class C(DummyTask):
pass
c = C()
class B(DummyTask):
def requires(self):
return a, c
b = B()
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertFalse(a.has_run)
def test_requires_exception(self):
class A(DummyTask):
def requires(self):
raise Exception("doh")
a = A()
class D(DummyTask):
pass
d = D()
class C(DummyTask):
def requires(self):
return d
c = C()
class B(DummyTask):
def requires(self):
return c, a
b = B()
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertTrue(d.has_run)
self.assertFalse(a.has_run)
class DynamicDependenciesTest(unittest.TestCase):
n_workers = 1
timeout = float('inf')
def setUp(self):
self.p = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.p)
def test_dynamic_dependencies(self, use_banana_task=False):
t0 = time.time()
t = DynamicRequires(p=self.p, use_banana_task=use_banana_task)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
# loop through output and verify
f = t.output().open('r')
for i in range(7):
self.assertEqual(f.readline().strip(), '%d: Done!' % i)
self.assertTrue(time.time() - t0 < self.timeout)
def test_dynamic_dependencies_with_namespace(self):
self.test_dynamic_dependencies(use_banana_task=True)
def test_dynamic_dependencies_other_module(self):
t = DynamicRequiresOtherModule(p=self.p)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
class DynamicDependenciesWithMultipleWorkersTest(DynamicDependenciesTest):
n_workers = 100
timeout = 3.0 # We run 7 tasks that take 0.5s each so it should take less than 3.5s
class WorkerPingThreadTests(unittest.TestCase):
def test_ping_retry(self):
""" Worker ping fails once. Ping continues to try to connect to scheduler
Kind of ugly since it uses actual timing with sleep to test the thread
"""
sch = CentralPlannerScheduler(
retry_delay=100,
remove_delay=1000,
worker_disconnect_delay=10,
)
self._total_pings = 0 # class var so it can be accessed from fail_ping
def fail_ping(worker):
# this will be called from within keep-alive thread...
self._total_pings += 1
raise Exception("Some random exception")
sch.ping = fail_ping
with Worker(
scheduler=sch,
worker_id="foo",
ping_interval=0.01 # very short between pings to make test fast
):
# let the keep-alive thread run for a bit...
time.sleep(0.1) # yes, this is ugly but it's exactly what we need to test
self.assertTrue(
self._total_pings > 1,
msg="Didn't retry pings (%d pings performed)" % (self._total_pings,)
)
def test_ping_thread_shutdown(self):
with Worker(ping_interval=0.01) as w:
self.assertTrue(w._keep_alive_thread.is_alive())
self.assertFalse(w._keep_alive_thread.is_alive())
def email_patch(test_func, email_config=None):
EMAIL_CONFIG = {"core": {"error-email": "not-a-real-email-address-for-test-only"}, "email": {"force-send": "true"}}
if email_config is not None:
EMAIL_CONFIG.update(email_config)
emails = []
def mock_send_email(sender, recipients, msg):
emails.append(msg)
@with_config(EMAIL_CONFIG)
@functools.wraps(test_func)
@mock.patch('smtplib.SMTP')
def run_test(self, smtp):
smtp().sendmail.side_effect = mock_send_email
test_func(self, emails)
return run_test
def custom_email_patch(config):
return functools.partial(email_patch, email_config=config)
class WorkerEmailTest(LuigiTestCase):
def run(self, result=None):
super(WorkerEmailTest, self).setUp()
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as self.worker:
super(WorkerEmailTest, self).run(result)
@email_patch
def test_connection_error(self, emails):
sch = RemoteScheduler('http://tld.invalid:1337', connect_timeout=1)
self.waits = 0
def dummy_wait():
self.waits += 1
sch._wait = dummy_wait
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
with Worker(scheduler=sch) as worker:
try:
worker.add(a)
except RPCError:
self.assertEqual(self.waits, 2) # should attempt to add it 3 times
self.assertNotEqual(emails, [])
self.assertTrue(emails[0].find("Luigi: Framework error while scheduling %s" % (a,)) != -1)
else:
self.fail()
@email_patch
def test_complete_error(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@email_patch
def test_requires_error(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertFalse(a.has_run)
@email_patch
def test_complete_return_value(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@email_patch
def test_run_error(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
a = A()
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
@email_patch
def test_task_process_dies(self, emails):
a = SuicidalWorker(signal.SIGKILL)
luigi.build([a], workers=2, local_scheduler=True)
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("died unexpectedly with exit code -9") != -1)
@email_patch
def test_task_times_out(self, emails):
class A(luigi.Task):
worker_timeout = 0.00001
def run(self):
time.sleep(5)
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("timed out and was terminated.") != -1)
@with_config(dict(worker=dict(retry_external_tasks='true')))
@email_patch
def test_external_task_retries(self, emails):
"""
Test that we do not send error emails on the failures of external tasks
"""
class A(luigi.ExternalTask):
pass
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(emails, [])
@email_patch
def test_no_error(self, emails):
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertTrue(a.complete())
@custom_email_patch({"core": {"error-email": "not-a-real-email-address-for-test-only", 'email-type': 'none'}})
def test_disable_emails(self, emails):
class A(luigi.Task):
def complete(self):
raise Exception("b0rk")
self.worker.add(A())
self.assertEqual(emails, [])
class RaiseSystemExit(luigi.Task):
def run(self):
raise SystemExit("System exit!!")
class SuicidalWorker(luigi.Task):
signal = luigi.IntParameter()
def run(self):
os.kill(os.getpid(), self.signal)
class HungWorker(luigi.Task):
worker_timeout = luigi.IntParameter(default=None)
def run(self):
while True:
pass
def complete(self):
return False
class MultipleWorkersTest(unittest.TestCase):
@unittest.skip('Always skip. There are many intermittent failures')
# This pass under python3 when run as `nosetests test/worker_test.py`
# but not as `nosetests test`. Probably some side effect on previous tests
@unittest.skipIf(six.PY3, 'This test fail on python3 when run with tox.')
def test_multiple_workers(self):
# Test using multiple workers
# Also test generating classes dynamically since this may reflect issues with
# various platform and how multiprocessing is implemented. If it's using os.fork
# under the hood it should be fine, but dynamic classses can't be pickled, so
# other implementations of multiprocessing (using spawn etc) may fail
class MyDynamicTask(luigi.Task):
x = luigi.Parameter()
def run(self):
time.sleep(0.1)
t0 = time.time()
luigi.build([MyDynamicTask(i) for i in range(100)], workers=100, local_scheduler=True)
self.assertTrue(time.time() < t0 + 5.0) # should ideally take exactly 0.1s, but definitely less than 10.0
def test_zero_workers(self):
d = DummyTask()
luigi.build([d], workers=0, local_scheduler=True)
self.assertFalse(d.complete())
def test_system_exit(self):
# This would hang indefinitely before this fix:
# https://github.com/spotify/luigi/pull/439
luigi.build([RaiseSystemExit()], workers=2, local_scheduler=True)
def test_term_worker(self):
luigi.build([SuicidalWorker(signal.SIGTERM)], workers=2, local_scheduler=True)
def test_kill_worker(self):
luigi.build([SuicidalWorker(signal.SIGKILL)], workers=2, local_scheduler=True)
def test_purge_multiple_workers(self):
w = Worker(worker_processes=2, wait_interval=0.01)
t1 = SuicidalWorker(signal.SIGTERM)
t2 = SuicidalWorker(signal.SIGKILL)
w.add(t1)
w.add(t2)
w._run_task(t1.task_id)
w._run_task(t2.task_id)
time.sleep(1.0)
w._handle_next_task()
w._handle_next_task()
w._handle_next_task()
def test_stop_worker_kills_subprocesses(self):
with Worker(worker_processes=2) as w:
hung_task = HungWorker()
w.add(hung_task)
w._run_task(hung_task.task_id)
pids = [p.pid for p in w._running_tasks.values()]
self.assertEqual(1, len(pids))
pid = pids[0]
def is_running():
return pid in {p.pid for p in psutil.Process().children()}
self.assertTrue(is_running())
self.assertFalse(is_running())
def test_time_out_hung_worker(self):
luigi.build([HungWorker(0.1)], workers=2, local_scheduler=True)
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953986')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_default_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HungWorker()
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 5
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 6
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/76645264')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_override_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HungWorker(worker_timeout=10)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 10
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 11
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
class Dummy2Task(Task):
p = luigi.Parameter()
def output(self):
return MockTarget(self.p)
def run(self):
f = self.output().open('w')
f.write('test')
f.close()
class AssistantTest(unittest.TestCase):
def run(self, result=None):
self.sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.assistant = Worker(scheduler=self.sch, worker_id='Y', assistant=True)
with Worker(scheduler=self.sch, worker_id='X') as w:
self.w = w
super(AssistantTest, self).run(result)
def test_get_work(self):
d = Dummy2Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assistant.run()
self.assertTrue(d.complete())
def test_bad_job_type(self):
class Dummy3Task(Dummy2Task):
task_family = 'UnknownTaskFamily'
d = Dummy3Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assertFalse(self.assistant.run())
self.assertFalse(d.complete())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [d.task_id])
def test_unimported_job_type(self):
class NotImportedTask(luigi.Task):
task_family = 'UnimportedTask'
task_module = None
task = NotImportedTask()
# verify that it can't run the task without the module info necessary to import it
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
# check that it can import with the right module
task.task_module = 'dummy_test_module.not_imported'
self.w.add(task)
self.assertTrue(self.assistant.run())
self.assertEqual(list(self.sch.task_list('DONE', '').keys()), [task.task_id])
class ForkBombTask(luigi.Task):
depth = luigi.IntParameter()
breadth = luigi.IntParameter()
p = luigi.Parameter(default=(0, )) # ehm for some weird reason [0] becomes a tuple...?
def output(self):
return MockTarget('.'.join(map(str, self.p)))
def run(self):
with self.output().open('w') as f:
f.write('Done!')
def requires(self):
if len(self.p) < self.depth:
for i in range(self.breadth):
yield ForkBombTask(self.depth, self.breadth, self.p + (i, ))
class TaskLimitTest(unittest.TestCase):
def tearDown(self):
MockFileSystem().remove('')
@with_config({'core': {'worker-task-limit': '6'}})
def test_task_limit_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertFalse(t.complete())
leaf_tasks = [ForkBombTask(3, 2, branch) for branch in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]]
self.assertEqual(3, sum(t.complete() for t in leaf_tasks),
"should have gracefully completed as much as possible even though the single last leaf didn't get scheduled")
@with_config({'core': {'worker-task-limit': '7'}})
def test_task_limit_not_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
def test_no_task_limit(self):
w = Worker()
t = ForkBombTask(4, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
class WorkerConfigurationTest(unittest.TestCase):
def test_asserts_for_worker(self):
"""
Test that Worker() asserts that it's sanely configured
"""
Worker(wait_interval=1) # This shouldn't raise
self.assertRaises(AssertionError, Worker, wait_interval=0)
class WorkerWaitJitterTest(unittest.TestCase):
@with_config({'worker': {'wait_jitter': '10.0'}})
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter(self, mock_sleep, mock_random):
""" verify configured jitter amount """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 2.0
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(3.0)
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter_default(self, mock_sleep, mock_random):
""" verify default jitter is as expected """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 3.3
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(4.3)
class KeyboardInterruptBehaviorTest(LuigiTestCase):
def test_propagation_when_executing(self):
"""
Ensure that keyboard interrupts causes luigi to quit when you are
executing tasks.
TODO: Add a test that tests the multiprocessing (--worker >1) case
"""
class KeyboardInterruptTask(luigi.Task):
def run(self):
raise KeyboardInterrupt()
cmd = 'KeyboardInterruptTask --local-scheduler --no-lock'.split(' ')
self.assertRaises(KeyboardInterrupt, luigi_run, cmd)
def test_propagation_when_scheduling(self):
"""
Test that KeyboardInterrupt causes luigi to quit while scheduling.
"""
class KeyboardInterruptTask(luigi.Task):
def complete(self):
raise KeyboardInterrupt()
class ExternalKeyboardInterruptTask(luigi.ExternalTask):
def complete(self):
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, luigi_run,
['KeyboardInterruptTask', '--local-scheduler', '--no-lock'])
self.assertRaises(KeyboardInterrupt, luigi_run,
['ExternalKeyboardInterruptTask', '--local-scheduler', '--no-lock'])
|
go_tool.py
|
from __future__ import absolute_import
import argparse
import copy
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import six
from functools import reduce
arc_project_prefix = 'a.yandex-team.ru/'
std_lib_prefix = 'contrib/go/_std/src/'
vendor_prefix = 'vendor/'
vet_info_ext = '.vet.out'
vet_report_ext = '.vet.txt'
FIXED_CGO1_SUFFIX='.fixed.cgo1.go'
COMPILE_OPTIMIZATION_FLAGS=('-N',)
def get_trimpath_args(args):
return ['-trimpath', args.trimpath] if args.trimpath else []
def preprocess_cgo1(src_path, dst_path, source_root):
with open(src_path, 'r') as f:
content = f.read()
content = content.replace('__ARCADIA_SOURCE_ROOT_PREFIX__', source_root)
with open(dst_path, 'w') as f:
f.write(content)
def preprocess_args(args):
# Temporary work around for noauto
if args.cgo_srcs and len(args.cgo_srcs) > 0:
cgo_srcs_set = set(args.cgo_srcs)
args.srcs = [x for x in args.srcs if x not in cgo_srcs_set]
args.pkg_root = os.path.join(args.toolchain_root, 'pkg')
toolchain_tool_root = os.path.join(args.pkg_root, 'tool', '{}_{}'.format(args.host_os, args.host_arch))
args.go_compile = os.path.join(toolchain_tool_root, 'compile')
args.go_cgo = os.path.join(toolchain_tool_root, 'cgo')
args.go_link = os.path.join(toolchain_tool_root, 'link')
args.go_asm = os.path.join(toolchain_tool_root, 'asm')
args.go_pack = os.path.join(toolchain_tool_root, 'pack')
args.go_vet = os.path.join(toolchain_tool_root, 'vet') if args.vet is True else args.vet
args.output = os.path.normpath(args.output)
args.vet_report_output = vet_report_output_name(args.output, args.vet_report_ext)
args.trimpath = None
if args.debug_root_map:
roots = {'build': args.build_root, 'source': args.source_root, 'tools': args.tools_root}
replaces = []
for root in args.debug_root_map.split(';'):
src, dst = root.split('=', 1)
assert src in roots
replaces.append('{}=>{}'.format(roots[src], dst))
del roots[src]
assert len(replaces) > 0
args.trimpath = ';'.join(replaces)
args.build_root = os.path.normpath(args.build_root)
args.build_root_dir = args.build_root + os.path.sep
args.source_root = os.path.normpath(args.source_root)
args.source_root_dir = args.source_root + os.path.sep
args.output_root = os.path.normpath(args.output_root)
args.import_map = {}
args.module_map = {}
if args.cgo_peers:
args.cgo_peers = [x for x in args.cgo_peers if not x.endswith('.fake.pkg')]
assert args.mode == 'test' or args.test_srcs is None and args.xtest_srcs is None
# add lexical oreder by basename for go sources
args.srcs.sort(key=lambda x: os.path.basename(x))
if args.test_srcs:
args.srcs += sorted(args.test_srcs, key=lambda x: os.path.basename(x))
del args.test_srcs
if args.xtest_srcs:
args.xtest_srcs.sort(key=lambda x: os.path.basename(x))
# compute root relative module dir path
assert args.output is None or args.output_root == os.path.dirname(args.output)
assert args.output_root.startswith(args.build_root_dir)
args.module_path = args.output_root[len(args.build_root_dir):]
assert len(args.module_path) > 0
args.import_path, args.is_std = get_import_path(args.module_path)
assert args.asmhdr is None or args.word == 'go'
srcs = []
for f in args.srcs:
if f.endswith(FIXED_CGO1_SUFFIX) and f.startswith(args.build_root_dir):
path = os.path.join(args.output_root, '{}.cgo1.go'.format(os.path.basename(f[:-len(FIXED_CGO1_SUFFIX)])))
srcs.append(path)
preprocess_cgo1(f, path, args.source_root)
else:
srcs.append(f)
args.srcs = srcs
classify_srcs(args.srcs, args)
def compare_versions(version1, version2):
v1 = tuple(str(int(x)).zfill(8) for x in version1.split('.'))
v2 = tuple(str(int(x)).zfill(8) for x in version2.split('.'))
if v1 == v2:
return 0
return 1 if v1 < v2 else -1
def get_symlink_or_copyfile():
os_symlink = getattr(os, 'symlink', None)
if os_symlink is None:
os_symlink = shutil.copyfile
return os_symlink
def copy_args(args):
return copy.copy(args)
def get_vendor_index(import_path):
index = import_path.rfind('/' + vendor_prefix)
if index < 0:
index = 0 if import_path.startswith(vendor_prefix) else index
else:
index = index + 1
return index
def get_import_path(module_path):
assert len(module_path) > 0
import_path = module_path.replace('\\', '/')
is_std_module = import_path.startswith(std_lib_prefix)
if is_std_module:
import_path = import_path[len(std_lib_prefix):]
elif import_path.startswith(vendor_prefix):
import_path = import_path[len(vendor_prefix):]
else:
import_path = arc_project_prefix + import_path
assert len(import_path) > 0
return import_path, is_std_module
def call(cmd, cwd, env=None):
# sys.stderr.write('{}\n'.format(' '.join(cmd)))
return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, cwd=cwd, env=env)
def classify_srcs(srcs, args):
args.go_srcs = [x for x in srcs if x.endswith('.go')]
args.asm_srcs = [x for x in srcs if x.endswith('.s')]
args.objects = [x for x in srcs if x.endswith('.o') or x.endswith('.obj')]
args.symabis = [x for x in srcs if x.endswith('.symabis')]
args.sysos = [x for x in srcs if x.endswith('.syso')]
def get_import_config_info(peers, gen_importmap, import_map={}, module_map={}):
info = {'importmap': [], 'packagefile': [], 'standard': {}}
if gen_importmap:
for key, value in six.iteritems(import_map):
info['importmap'].append((key, value))
for peer in peers:
peer_import_path, is_std = get_import_path(os.path.dirname(peer))
if gen_importmap:
index = get_vendor_index(peer_import_path)
if index >= 0:
index += len(vendor_prefix)
info['importmap'].append((peer_import_path[index:], peer_import_path))
info['packagefile'].append((peer_import_path, os.path.join(args.build_root, peer)))
if is_std:
info['standard'][peer_import_path] = True
for key, value in six.iteritems(module_map):
info['packagefile'].append((key, value))
return info
def create_import_config(peers, gen_importmap, import_map={}, module_map={}):
lines = []
info = get_import_config_info(peers, gen_importmap, import_map, module_map)
for key in ('importmap', 'packagefile'):
for item in info[key]:
lines.append('{} {}={}'.format(key, *item))
if len(lines) > 0:
lines.append('')
content = '\n'.join(lines)
# sys.stderr.writelines('{}\n'.format(l) for l in lines)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(content)
return f.name
return None
def vet_info_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_info_ext)
def vet_report_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_report_ext)
def get_source_path(args):
return args.test_import_path or args.module_path
def gen_vet_info(args):
import_path = args.real_import_path if hasattr(args, 'real_import_path') else args.import_path
info = get_import_config_info(args.peers, True, args.import_map, args.module_map)
import_map = dict(info['importmap'])
# FIXME(snermolaev): it seems that adding import map for 'fake' package
# does't make any harm (it needs to be revised later)
import_map['unsafe'] = 'unsafe'
for (key, _) in info['packagefile']:
if key not in import_map:
import_map[key] = key
data = {
'ID': import_path,
'Compiler': 'gc',
'Dir': os.path.join(args.source_root, get_source_path(args)),
'ImportPath': import_path,
'GoFiles': [x for x in args.go_srcs if x.endswith('.go')],
'NonGoFiles': [x for x in args.go_srcs if not x.endswith('.go')],
'ImportMap': import_map,
'PackageFile': dict(info['packagefile']),
'Standard': dict(info['standard']),
'PackageVetx': dict((key, vet_info_output_name(value)) for key, value in info['packagefile']),
'VetxOnly': False,
'VetxOutput': vet_info_output_name(args.output),
'SucceedOnTypecheckFailure': False
}
# sys.stderr.write('{}\n'.format(json.dumps(data, indent=4)))
return data
def create_vet_config(args, info):
with tempfile.NamedTemporaryFile(delete=False, suffix='.cfg') as f:
f.write(json.dumps(info))
return f.name
def decode_vet_report(json_report):
report = ''
if json_report:
try:
full_diags = json.JSONDecoder(encoding='UTF-8').decode(json_report)
except ValueError:
report = json_report
else:
messages = []
for _, module_diags in six.iteritems(full_diags):
for _, type_diags in six.iteritems(module_diags):
for diag in type_diags:
messages.append(u'{}: {}'.format(diag['posn'], diag['message']))
report = '\n'.join(sorted(messages)).encode('UTF-8')
return report
def dump_vet_report(args, report):
if report:
report = report.replace(args.build_root, '$B')
report = report.replace(args.source_root, '$S')
with open(args.vet_report_output, 'w') as f:
f.write(report)
def read_vet_report(args):
assert args
report = ''
if os.path.exists(args.vet_report_output):
with open(args.vet_report_output, 'r') as f:
report += f.read()
return report
def dump_vet_report_for_tests(args, *test_args_list):
dump_vet_report(args, reduce(lambda x, y: x + read_vet_report(y), [_f for _f in test_args_list if _f], ''))
def do_vet(args):
assert args.vet
info = gen_vet_info(args)
vet_config = create_vet_config(args, info)
cmd = [args.go_vet, '-json']
if args.vet_flags:
cmd.extend(args.vet_flags)
cmd.append(vet_config)
# sys.stderr.write('>>>> [{}]\n'.format(' '.join(cmd)))
p_vet = subprocess.Popen(cmd, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=args.source_root)
vet_out, vet_err = p_vet.communicate()
report = decode_vet_report(vet_out) if vet_out else ''
dump_vet_report(args, report)
if p_vet.returncode:
raise subprocess.CalledProcessError(returncode=p_vet.returncode, cmd=cmd, output=vet_err)
def _do_compile_go(args):
import_path, is_std_module = args.import_path, args.is_std
cmd = [
args.go_compile,
'-o',
args.output,
'-p',
import_path,
'-D',
'""',
'-goversion',
'go{}'.format(args.goversion)
]
cmd.extend(get_trimpath_args(args))
if is_std_module:
cmd.append('-std')
if import_path == 'runtime' or import_path.startswith('runtime/internal/'):
cmd.append('-+')
import_config_name = create_import_config(args.peers, True, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
else:
if import_path == 'unsafe' or len(args.objects) > 0 or args.asmhdr:
pass
else:
cmd.append('-complete')
if args.asmhdr:
cmd += ['-asmhdr', args.asmhdr]
if compare_versions('1.12', args.goversion) >= 0:
if args.symabis:
cmd += ['-symabis'] + args.symabis
if compare_versions('1.13', args.goversion) >= 0:
pass
elif import_path in ('runtime', 'runtime/internal/atomic'):
cmd.append('-allabis')
compile_workers = '4'
if args.compile_flags:
if import_path == 'runtime' or import_path.startswith('runtime/'):
cmd.extend(x for x in args.compile_flags if x not in COMPILE_OPTIMIZATION_FLAGS)
else:
cmd.extend(args.compile_flags)
if any([x in ('-race', '-shared') for x in args.compile_flags]):
compile_workers = '1'
cmd += ['-pack', '-c={}'.format(compile_workers)]
cmd += args.go_srcs
call(cmd, args.build_root)
class VetThread(threading.Thread):
def __init__(self, target, args):
super(VetThread, self).__init__(target=target, args=args)
self.exc_info = None
def run(self):
try:
super(VetThread, self).run()
except:
self.exc_info = sys.exc_info()
def join_with_exception(self, reraise_exception):
self.join()
if reraise_exception and self.exc_info:
six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
def do_compile_go(args):
raise_exception_from_vet = False
if args.vet:
run_vet = VetThread(target=do_vet, args=(args,))
run_vet.start()
try:
_do_compile_go(args)
raise_exception_from_vet = True
finally:
if args.vet:
run_vet.join_with_exception(raise_exception_from_vet)
def do_compile_asm(args):
assert(len(args.srcs) == 1 and len(args.asm_srcs) == 1)
cmd = [args.go_asm]
cmd += get_trimpath_args(args)
cmd += ['-I', args.output_root, '-I', os.path.join(args.pkg_root, 'include')]
cmd += ['-D', 'GOOS_' + args.targ_os, '-D', 'GOARCH_' + args.targ_arch, '-o', args.output]
if args.asm_flags:
cmd += args.asm_flags
cmd += args.asm_srcs
call(cmd, args.build_root)
def do_link_lib(args):
if len(args.asm_srcs) > 0:
asmargs = copy_args(args)
asmargs.asmhdr = os.path.join(asmargs.output_root, 'go_asm.h')
do_compile_go(asmargs)
for src in asmargs.asm_srcs:
asmargs.srcs = [src]
asmargs.asm_srcs = [src]
asmargs.output = os.path.join(asmargs.output_root, os.path.basename(src) + '.o')
do_compile_asm(asmargs)
args.objects.append(asmargs.output)
else:
do_compile_go(args)
if args.objects:
cmd = [args.go_pack, 'r', args.output] + args.objects + args.sysos
call(cmd, args.build_root)
def do_link_exe(args):
assert args.extld is not None
assert args.non_local_peers is not None
compile_args = copy_args(args)
compile_args.output = os.path.join(args.output_root, 'main.a')
compile_args.real_import_path = compile_args.import_path
compile_args.import_path = 'main'
if args.vcs and os.path.isfile(compile_args.vcs):
build_info = os.path.join('library', 'go', 'core', 'buildinfo')
if any([x.startswith(build_info) for x in compile_args.peers]):
compile_args.go_srcs.append(compile_args.vcs)
do_link_lib(compile_args)
cmd = [args.go_link, '-o', args.output]
import_config_name = create_import_config(args.peers + args.non_local_peers, False, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
if args.link_flags:
cmd += args.link_flags
if args.mode in ('exe', 'test'):
cmd.append('-buildmode=exe')
elif args.mode == 'dll':
cmd.append('-buildmode=c-shared')
else:
assert False, 'Unexpected mode: {}'.format(args.mode)
cmd.append('-extld={}'.format(args.extld))
extldflags = []
if args.extldflags is not None:
filter_musl = bool
if args.musl:
cmd.append('-linkmode=external')
extldflags.append('-static')
filter_musl = lambda x: not x in ('-lc', '-ldl', '-lm', '-lpthread', '-lrt')
extldflags += [x for x in args.extldflags if filter_musl(x)]
cgo_peers = []
if args.cgo_peers is not None and len(args.cgo_peers) > 0:
is_group = args.targ_os == 'linux'
if is_group:
cgo_peers.append('-Wl,--start-group')
cgo_peers.extend(os.path.join(args.build_root, x) for x in args.cgo_peers)
if is_group:
cgo_peers.append('-Wl,--end-group')
try:
index = extldflags.index('--cgo-peers')
extldflags = extldflags[:index] + cgo_peers + extldflags[index+1:]
except ValueError:
extldflags.extend(cgo_peers)
if len(extldflags) > 0:
cmd.append('-extldflags={}'.format(' '.join(extldflags)))
cmd.append(compile_args.output)
call(cmd, args.build_root)
def gen_cover_info(args):
lines = []
lines.extend([
"""
var (
coverCounters = make(map[string][]uint32)
coverBlocks = make(map[string][]testing.CoverBlock)
)
""",
'func init() {',
])
for var, file in (x.split(':') for x in args.cover_info):
lines.append(' coverRegisterFile("{file}", _cover0.{var}.Count[:], _cover0.{var}.Pos[:], _cover0.{var}.NumStmt[:])'.format(file=file, var=var))
lines.extend([
'}',
"""
func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
panic("coverage: mismatched sizes")
}
if coverCounters[fileName] != nil {
// Already registered.
return
}
coverCounters[fileName] = counter
block := make([]testing.CoverBlock, len(counter))
for i := range counter {
block[i] = testing.CoverBlock{
Line0: pos[3*i+0],
Col0: uint16(pos[3*i+2]),
Line1: pos[3*i+1],
Col1: uint16(pos[3*i+2]>>16),
Stmts: numStmts[i],
}
}
coverBlocks[fileName] = block
}
""",
])
return lines
def filter_out_skip_tests(tests, skip_tests):
skip_set = set()
star_skip_set = set()
for t in skip_tests:
work_set = star_skip_set if '*' in t else skip_set
work_set.add(t)
re_star_tests = None
if len(star_skip_set) > 0:
re_star_tests = re.compile(re.sub(r'(\*)+', r'.\1', '^({})$'.format('|'.join(star_skip_set))))
return [x for x in tests if not (x in skip_tests or re_star_tests and re_star_tests.match(x))]
def gen_test_main(args, test_lib_args, xtest_lib_args):
assert args and (test_lib_args or xtest_lib_args)
test_miner = args.test_miner
test_module_path = test_lib_args.import_path if test_lib_args else xtest_lib_args.import_path
is_cover = args.cover_info and len(args.cover_info) > 0
# Prepare GOPATH
# $BINDIR
# |- __go__
# |- src
# |- pkg
# |- ${TARGET_OS}_${TARGET_ARCH}
go_path_root = os.path.join(args.output_root, '__go__')
test_src_dir = os.path.join(go_path_root, 'src')
target_os_arch = '_'.join([args.targ_os, args.targ_arch])
test_pkg_dir = os.path.join(go_path_root, 'pkg', target_os_arch, os.path.dirname(test_module_path))
os.makedirs(test_pkg_dir)
my_env = os.environ.copy()
my_env['GOROOT'] = ''
my_env['GOPATH'] = go_path_root
my_env['GOARCH'] = args.targ_arch
my_env['GOOS'] = args.targ_os
tests = []
xtests = []
os_symlink = get_symlink_or_copyfile()
# Get the list of "internal" tests
if test_lib_args:
os.makedirs(os.path.join(test_src_dir, test_module_path))
os_symlink(test_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(test_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', test_module_path]
tests = [x for x in (call(cmd, test_lib_args.output_root, my_env) or '').strip().split('\n') if len(x) > 0]
if args.skip_tests:
tests = filter_out_skip_tests(tests, args.skip_tests)
test_main_found = '#TestMain' in tests
# Get the list of "external" tests
if xtest_lib_args:
xtest_module_path = xtest_lib_args.import_path
os.makedirs(os.path.join(test_src_dir, xtest_module_path))
os_symlink(xtest_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(xtest_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', xtest_module_path]
xtests = [x for x in (call(cmd, xtest_lib_args.output_root, my_env) or '').strip().split('\n') if len(x) > 0]
if args.skip_tests:
xtests = filter_out_skip_tests(xtests, args.skip_tests)
xtest_main_found = '#TestMain' in xtests
test_main_package = None
if test_main_found and xtest_main_found:
assert False, 'multiple definition of TestMain'
elif test_main_found:
test_main_package = '_test'
elif xtest_main_found:
test_main_package = '_xtest'
shutil.rmtree(go_path_root)
lines = ['package main', '', 'import (']
if test_main_package is None:
lines.append(' "os"')
lines.extend([' "testing"', ' "testing/internal/testdeps"'])
if len(tests) > 0:
lines.append(' _test "{}"'.format(test_module_path))
elif test_lib_args:
lines.append(' _ "{}"'.format(test_module_path))
if len(xtests) > 0:
lines.append(' _xtest "{}"'.format(xtest_module_path))
elif xtest_lib_args:
lines.append(' _ "{}"'.format(xtest_module_path))
if is_cover:
lines.append(' _cover0 "{}"'.format(test_module_path))
lines.extend([')', ''])
for kind in ['Test', 'Benchmark', 'Example']:
lines.append('var {}s = []testing.Internal{}{{'.format(kind.lower(), kind))
for test in [x for x in tests if x.startswith(kind)]:
lines.append(' {{"{test}", _test.{test}}},'.format(test=test))
for test in [x for x in xtests if x.startswith(kind)]:
lines.append(' {{"{test}", _xtest.{test}}},'.format(test=test))
lines.extend(['}', ''])
if is_cover:
lines.extend(gen_cover_info(args))
lines.append('func main() {')
if is_cover:
lines.extend([
' testing.RegisterCover(testing.Cover{',
' Mode: "set",',
' Counters: coverCounters,',
' Blocks: coverBlocks,',
' CoveredPackages: "",',
' })',
])
lines.extend([
' m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)'
'',
])
if test_main_package:
lines.append(' {}.TestMain(m)'.format(test_main_package))
else:
lines.append(' os.Exit(m.Run())')
lines.extend(['}', ''])
content = '\n'.join(lines)
# sys.stderr.write('{}\n'.format(content))
return content
def do_link_test(args):
assert args.srcs or args.xtest_srcs
assert args.test_miner is not None
test_module_path = get_source_path(args)
test_import_path, _ = get_import_path(test_module_path)
test_lib_args = copy_args(args) if args.srcs else None
xtest_lib_args = copy_args(args) if args.xtest_srcs else None
ydx_file_name = None
xtest_ydx_file_name = None
need_append_ydx = test_lib_args and xtest_lib_args and args.ydx_file and args.vet_flags
if need_append_ydx:
def find_ydx_file_name(name, flags):
for i, elem in enumerate(flags):
if elem.endswith(name):
return (i, elem)
assert False, 'Unreachable code'
idx, ydx_file_name = find_ydx_file_name(xtest_lib_args.ydx_file, xtest_lib_args.vet_flags)
xtest_ydx_file_name = '{}_xtest'.format(ydx_file_name)
xtest_lib_args.vet_flags = copy.copy(xtest_lib_args.vet_flags)
xtest_lib_args.vet_flags[idx] = xtest_ydx_file_name
if test_lib_args:
test_lib_args.output = os.path.join(args.output_root, 'test.a')
test_lib_args.vet_report_output = vet_report_output_name(test_lib_args.output)
test_lib_args.module_path = test_module_path
test_lib_args.import_path = test_import_path
do_link_lib(test_lib_args)
if xtest_lib_args:
xtest_lib_args.srcs = xtest_lib_args.xtest_srcs
classify_srcs(xtest_lib_args.srcs, xtest_lib_args)
xtest_lib_args.output = os.path.join(args.output_root, 'xtest.a')
xtest_lib_args.vet_report_output = vet_report_output_name(xtest_lib_args.output)
xtest_lib_args.module_path = test_module_path + '_test'
xtest_lib_args.import_path = test_import_path + '_test'
if test_lib_args:
xtest_lib_args.module_map[test_import_path] = test_lib_args.output
need_append_ydx = args.ydx_file and args.srcs and args.vet_flags
do_link_lib(xtest_lib_args)
if need_append_ydx:
with open(os.path.join(args.build_root, ydx_file_name), 'ab') as dst_file:
with open(os.path.join(args.build_root, xtest_ydx_file_name), 'rb') as src_file:
dst_file.write(src_file.read())
test_main_content = gen_test_main(args, test_lib_args, xtest_lib_args)
test_main_name = os.path.join(args.output_root, '_test_main.go')
with open(test_main_name, "w") as f:
f.write(test_main_content)
test_args = copy_args(args)
test_args.srcs = [test_main_name]
if test_args.test_import_path is None:
# it seems that we can do it unconditionally, but this kind
# of mangling doesn't really looks good to me and we leave it
# for pure GO_TEST module
test_args.module_path = test_args.module_path + '___test_main__'
test_args.import_path = test_args.import_path + '___test_main__'
classify_srcs(test_args.srcs, test_args)
if test_lib_args:
test_args.module_map[test_lib_args.import_path] = test_lib_args.output
if xtest_lib_args:
test_args.module_map[xtest_lib_args.import_path] = xtest_lib_args.output
if args.vet:
dump_vet_report_for_tests(test_args, test_lib_args, xtest_lib_args)
test_args.vet = False
do_link_exe(test_args)
if __name__ == '__main__':
# Support @response-file notation for windows to reduce cmd length
if sys.argv[1].startswith('@'):
with open(sys.argv[1][1:]) as afile:
args = afile.read().splitlines()
sys.argv[:] = [sys.argv[0]] + args + sys.argv[2:]
parser = argparse.ArgumentParser(prefix_chars='+')
parser.add_argument('++mode', choices=['dll', 'exe', 'lib', 'test'], required=True)
parser.add_argument('++srcs', nargs='*', required=True)
parser.add_argument('++cgo-srcs', nargs='*')
parser.add_argument('++test_srcs', nargs='*')
parser.add_argument('++xtest_srcs', nargs='*')
parser.add_argument('++cover_info', nargs='*')
parser.add_argument('++output', nargs='?', default=None)
parser.add_argument('++source-root', default=None)
parser.add_argument('++build-root', required=True)
parser.add_argument('++tools-root', default=None)
parser.add_argument('++output-root', required=True)
parser.add_argument('++toolchain-root', required=True)
parser.add_argument('++host-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++host-arch', choices=['amd64'], required=True)
parser.add_argument('++targ-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++targ-arch', choices=['amd64', 'x86'], required=True)
parser.add_argument('++peers', nargs='*')
parser.add_argument('++non-local-peers', nargs='*')
parser.add_argument('++cgo-peers', nargs='*')
parser.add_argument('++asmhdr', nargs='?', default=None)
parser.add_argument('++test-import-path', nargs='?')
parser.add_argument('++test-miner', nargs='?')
parser.add_argument('++arc-project-prefix', nargs='?', default=arc_project_prefix)
parser.add_argument('++std-lib-prefix', nargs='?', default=std_lib_prefix)
parser.add_argument('++extld', nargs='?', default=None)
parser.add_argument('++extldflags', nargs='+', default=None)
parser.add_argument('++goversion', required=True)
parser.add_argument('++asm-flags', nargs='*')
parser.add_argument('++compile-flags', nargs='*')
parser.add_argument('++link-flags', nargs='*')
parser.add_argument('++vcs', nargs='?', default=None)
parser.add_argument('++vet', nargs='?', const=True, default=False)
parser.add_argument('++vet-flags', nargs='*', default=None)
parser.add_argument('++vet-info-ext', default=vet_info_ext)
parser.add_argument('++vet-report-ext', default=vet_report_ext)
parser.add_argument('++musl', action='store_true')
parser.add_argument('++skip-tests', nargs='*', default=None)
parser.add_argument('++ydx-file', default='')
parser.add_argument('++debug-root-map', default=None)
args = parser.parse_args()
arc_project_prefix = args.arc_project_prefix
std_lib_prefix = args.std_lib_prefix
vet_info_ext = args.vet_info_ext
vet_report_ext = args.vet_report_ext
preprocess_args(args)
try:
os.unlink(args.output)
except OSError:
pass
# We are going to support only 'lib', 'exe' and 'cgo' build modes currently
# and as a result we are going to generate only one build node per module
# (or program)
dispatch = {
'exe': do_link_exe,
'dll': do_link_exe,
'lib': do_link_lib,
'test': do_link_test
}
exit_code = 1
try:
dispatch[args.mode](args)
exit_code = 0
except KeyError:
sys.stderr.write('Unknown build mode [{}]...\n'.format(args.mode))
except subprocess.CalledProcessError as e:
sys.stderr.write('{} returned non-zero exit code {}.\n{}\n'.format(' '.join(e.cmd), e.returncode, e.output))
exit_code = e.returncode
except Exception as e:
sys.stderr.write('Unhandled exception [{}]...\n'.format(str(e)))
sys.exit(exit_code)
|
ppo.py
|
import numpy as np
import tensorflow as tf
# import tensorflow_probability as tfp
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
# ============================================================================================#
# Utilities
# ============================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR HW2 CODE HERE
with tf.variable_scope(scope):
x = input_placeholder
for i in range(n_layers):
x = tf.layers.dense(x, units=size, activation=activation)
output_placeholder = tf.layers.dense(x, units=output_size, activation=output_activation)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_ppo)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# ============================================================================================#
# Actor Critic
# ============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args, seed):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.num_target_updates = computation_graph_args['num_target_updates']
self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_advantage_args['gamma']
self.normalize_advantages = estimate_advantage_args['normalize_advantages']
self.seed = seed
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # may need if using GPU
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() # pylint: disable=E1101
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in actor critic
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR HW2 CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
sy_logprob_old_n = tf.placeholder(shape=[None], name="old_logprob", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n, sy_logprob_old_n
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
# YOUR_HW2 CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, "nn_actor", self.n_layers, self.size)
return sy_logits_na
else:
# YOUR_HW2 CODE_HERE
sy_mean = build_mlp(sy_ob_no, self.ac_dim, "policy", self.n_layers, self.size)
sy_logstd = tf.get_variable(name="sy_logstd", shape=[self.ac_dim], dtype=tf.float32)
return (sy_mean, sy_logstd)
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_HW2 CODE_HERE
# sy_log_a = tf.nn.log_softmax(sy_logits_na, 1)
# sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1, seed=self.seed), 1)
sy_sampled_ac = tf.squeeze(tf.random.categorical(sy_logits_na, 1, seed=self.seed), 1)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_HW2 CODE_HERE
z = tf.random_normal(shape=tf.shape(sy_mean), dtype=tf.float32)
sigma = tf.exp(sy_logstd)
sy_sampled_ac = sy_mean + sigma * z
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_HW2 CODE_HERE
# ac_onehot = tf.one_hot(sy_ac_na, self.ac_dim)
# sy_logits_softmax = tf.nn.log_softmax(sy_logits_na)
# sy_logprob_n = tf.reduce_sum(sy_logits_softmax*ac_onehot, axis=1)
sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_HW2 CODE_HERE
pre_sum = -0.5 * (((sy_ac_na - sy_mean) / (tf.exp(sy_logstd))) ** 2 + 2 * sy_logstd + np.log(2 * np.pi))
sy_logprob_n = tf.reduce_sum(pre_sum, axis=1)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.clip_ratio = 0.2
self.train_pi_iters = 80
self.train_v_iters = 80
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n, self.sy_logprob_old_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
self.ratio = tf.exp(self.sy_logprob_n - self.sy_logprob_old_n)
self.min_adv = tf.where(self.sy_adv_n>0, (1+self.clip_ratio)*self.sy_adv_n, (1-self.clip_ratio)*self.sy_adv_n)
self.actor_loss = -tf.reduce_mean(tf.minimum(self.ratio*self.sy_adv_n, self.min_adv))
self.actor_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.actor_loss)
# define the critic
self.critic_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_critic",
n_layers=self.n_layers,
size=self.size))
self.sy_target_n = tf.placeholder(shape=[None], name="critic_target", dtype=tf.float32)
self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)
self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode = (len(paths) == 0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards, next_obs, terminals, logps = [], [], [], [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: [ob]}) # YOUR HW2 CODE HERE
logp = self.sess.run(self.sy_logprob_n, feed_dict={self.sy_ob_no: [ob], self.sy_ac_na: ac})[0]
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
logps.append(logp)
next_obs.append(ob)
rewards.append(rew)
steps += 1
# If the episode ended, the corresponding terminal value is 1
# otherwise, it is 0
# YOUR CODE HERE
if done or steps > self.max_path_length:
terminals.append(1)
break
else:
terminals.append(0)
path = {"observation": np.array(obs, dtype=np.float32),
"reward": np.array(rewards, dtype=np.float32),
"action": np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32),
"logp": np.array(logps)}
return path
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Estimates the advantage function value for each timestep.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
# First, estimate the Q value as Q(s, a) = r(s, a) + gamma*V(s')
# To get the advantage, subtract the V(s) to get A(s, a) = Q(s, a) - V(s)
# This requires calling the critic twice --- to obtain V(s') when calculating Q(s, a),
# and V(s) when subtracting the baseline
# Note: don't forget to use terminal_n to cut off the V(s') term when computing Q(s, a)
# otherwise the values will grow without bound.
# YOUR CODE HERE
vs_ = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})
vs = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: ob_no})
adv_n = re_n + self.gamma * (1 - terminal_n) * vs_ - vs
if self.normalize_advantages:
adv_n = (adv_n - adv_n.mean()) / (adv_n.std() + 1e-8)
adv_n = 0. + (1. + 1e-8) * adv_n # YOUR_HW2 CODE_HERE
return adv_n
def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Update the parameters of the critic.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
nothing
"""
# Use a bootstrapped target values to update the critic
# Compute the target values r(s, a) + gamma*V(s') by calling the critic to compute V(s')
# In total, take n=self.num_grad_steps_per_target_update*self.num_target_updates gradient update steps
# Every self.num_grad_steps_per_target_update steps, recompute the target values
# by evaluating V(s') on the updated critic
# Note: don't forget to use terminal_n to cut off the V(s') term when computing the target
# otherwise the values will grow without bound.
# YOUR CODE HERE
for i in range(self.num_target_updates):
vs_ = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: next_ob_no})
target = re_n + self.gamma * (1 - terminal_n) * vs_
for j in range(self.num_grad_steps_per_target_update):
closs, _ = self.sess.run([self.critic_loss, self.critic_update_op],
feed_dict={self.sy_ob_no: ob_no, self.sy_target_n: target})
return closs
def update_actor(self, ob_no, ac_na, adv_n, logp):
"""
Update the parameters of the policy.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
for i in range(self.train_pi_iters):
aloss, _ = self.sess.run([self.actor_loss, self.actor_update_op],
feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n, self.sy_logprob_old_n: logp})
return aloss
def train_ppo(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
num_target_updates,
num_grad_steps_per_target_update,
animate,
logdir,
normalize_advantages,
seed,
n_layers,
size):
start = time.time()
# ========================================================================================#
# Set Up Logger
# ========================================================================================#
setup_logger(logdir, locals())
# ========================================================================================#
# Set Up Env
# ========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
# ========================================================================================#
# Initialize Agent
# ========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
'num_target_updates': num_target_updates,
'num_grad_steps_per_target_update': num_grad_steps_per_target_update,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_advantage_args = {
'gamma': gamma,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args, seed) # estimate_return_args
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
# ========================================================================================#
# Training Loop
# ========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************" % itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = np.concatenate([path["reward"] for path in paths])
next_ob_no = np.concatenate([path["next_observation"] for path in paths])
terminal_n = np.concatenate([path["terminal"] for path in paths])
logp = np.concatenate([path["logp"] for path in paths])
# Call tensorflow operations to:
# (1) update the critic, by calling agent.update_critic
# (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage
# (3) use the estimated advantage values to update the actor, by calling agent.update_actor
# YOUR CODE HERE
closs = agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)
adv = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)
aloss = agent.update_actor(ob_no, ac_na, adv, logp)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.log_tabular("closs", closs)
logz.log_tabular("aloss", aloss)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='ppo')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)
parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
if not (os.path.exists(data_path)):
os.makedirs(data_path)
logdir = 'ac_' + args.exp_name + '_' + args.env_name
logdir = os.path.join(data_path, logdir)
if not (os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10 * e
print('Running experiment with seed %d' % seed)
def train_func():
train_ppo(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_target_updates=args.num_target_updates,
num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,
animate=args.render,
logdir=os.path.join(logdir, '%d' % seed),
normalize_advantages=not (args.dont_normalize_advantages),
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_AC in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
commands.py
|
# These are the Mailpile commands, the public "API" we expose for searching,
# tagging and editing e-mail.
#
import copy
import datetime
import json
import os
import os.path
import random
import re
import shlex
import socket
import sys
import traceback
import threading
import time
import webbrowser
import mailpile.util
import mailpile.ui
import mailpile.postinglist
from mailpile.crypto.gpgi import GnuPG
from mailpile.eventlog import Event
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.mailboxes import IsMailbox
from mailpile.mailutils import AddressHeaderParser, ClearParseCache
from mailpile.mailutils import ExtractEmails, ExtractEmailAndName, Email
from mailpile.postinglist import GlobalPostingList
from mailpile.safe_popen import MakePopenUnsafe, MakePopenSafe
from mailpile.search import MailIndex
from mailpile.util import *
from mailpile.vcard import AddressInfo
class Command(object):
"""Generic command object all others inherit from"""
SYNOPSIS = (None, # CLI shortcode, e.g. A:
None, # CLI shortname, e.g. add
None, # API endpoint, e.g. sys/addmailbox
None) # Positional argument list
SYNOPSIS_ARGS = None # New-style positional argument list
API_VERSION = None
UI_CONTEXT = None
IS_USER_ACTIVITY = False
IS_HANGING_ACTIVITY = False
IS_INTERACTIVE = False
CONFIG_REQUIRED = True
COMMAND_CACHE_TTL = 0 # < 1 = Not cached
CHANGES_SESSION_CONTEXT = False
FAILURE = 'Failed: %(name)s %(args)s'
ORDER = (None, 0)
SPLIT_ARG = True # Uses shlex by default
RAISES = (UsageError, UrlRedirectException)
WITH_CONTEXT = ()
# Event logging settings
LOG_NOTHING = False
LOG_ARGUMENTS = True
LOG_PROGRESS = False
LOG_STARTING = '%(name)s: Starting'
LOG_FINISHED = '%(name)s: %(message)s'
# HTTP settings (note: security!)
HTTP_CALLABLE = ('GET', )
HTTP_POST_VARS = {}
HTTP_QUERY_VARS = {}
HTTP_BANNED_VARS = {}
HTTP_STRICT_VARS = True
HTTP_AUTH_REQUIRED = True
class CommandResult:
def __init__(self, command_obj, session,
command_name, doc, result, status, message,
template_id=None, kwargs={}, error_info={}):
self.session = session
self.command_obj = command_obj
self.command_name = command_name
self.kwargs = {}
self.kwargs.update(kwargs)
self.template_id = template_id
self.doc = doc
self.result = result
self.status = status
self.error_info = {}
self.error_info.update(error_info)
self.message = message
self.rendered = {}
self.renderers = {
'json': self.as_json,
'html': self.as_html,
'text': self.as_text,
'css': self.as_css,
'rss': self.as_rss,
'xml': self.as_xml,
'txt': self.as_txt,
'js': self.as_js
}
def __nonzero__(self):
return (self.result and True or False)
def as_(self, what, *args, **kwargs):
if args or kwargs:
# Args render things un-cacheable.
return self.renderers.get(what)(*args, **kwargs)
if what not in self.rendered:
self.rendered[what] = self.renderers.get(what, self.as_text)()
return self.rendered[what]
def as_text(self):
if isinstance(self.result, bool):
happy = '%s: %s' % (self.result and _('OK') or _('Failed'),
self.message or self.doc)
if not self.result and self.error_info:
return '%s\n%s' % (happy,
json.dumps(self.error_info, indent=4,
default=mailpile.util.json_helper))
else:
return happy
elif isinstance(self.result, (dict, list, tuple)):
return json.dumps(self.result, indent=4, sort_keys=True,
default=mailpile.util.json_helper)
else:
return unicode(self.result)
__str__ = lambda self: self.as_text()
__unicode__ = lambda self: self.as_text()
def as_dict(self):
from mailpile.urlmap import UrlMap
rv = {
'command': self.command_name,
'state': {
'command_url': UrlMap.ui_url(self.command_obj),
'context_url': UrlMap.context_url(self.command_obj),
'query_args': self.command_obj.state_as_query_args(),
'cache_id': self.command_obj.cache_id(),
'context': self.command_obj.context or ''
},
'status': self.status,
'message': self.message,
'result': self.result,
'event_id': self.command_obj.event.event_id,
'elapsed': '%.3f' % self.session.ui.time_elapsed,
}
if self.error_info:
rv['error'] = self.error_info
for ui_key in [k for k in self.kwargs.keys()
if k.startswith('ui_')]:
rv[ui_key] = self.kwargs[ui_key]
return rv
def as_json(self):
return self.session.ui.render_json(self.as_dict())
def as_html(self, template=None):
return self.as_template('html', template)
def as_js(self, template=None):
return self.as_template('js', template)
def as_css(self, template=None):
return self.as_template('css', template)
def as_rss(self, template=None):
return self.as_template('rss', template)
def as_xml(self, template=None):
return self.as_template('xml', template)
def as_txt(self, template=None):
return self.as_template('txt', template)
def as_template(self, etype, template=None):
what = ''.join((etype, '/' if template else '', template or ''))
for e in ('jhtml', 'jjs', 'jcss', 'jxml', 'jrss'):
if self.session.ui.render_mode.endswith(e):
what += ':content'
if what in self.rendered:
return self.rendered[what]
tpath = self.command_obj.template_path(
etype, template_id=self.template_id, template=template)
data = self.as_dict()
data['title'] = self.message
def render():
return self.session.ui.render_web(
self.session.config, [tpath], data)
if what.endswith(':content'):
data['render_mode'] = 'content'
data['result'] = render()
self.rendered[what] = self.session.ui.render_json(data)
else:
data['render_mode'] = 'full'
self.rendered[what] = render()
return self.rendered[what]
def __init__(self, session, name=None, arg=None, data=None, async=False):
self.session = session
self.context = None
self.name = self.SYNOPSIS[1] or self.SYNOPSIS[2] or name
self.data = data or {}
self.status = 'unknown'
self.message = name
self.error_info = {}
self.result = None
self.run_async = async
if type(arg) in (type(list()), type(tuple())):
self.args = tuple(arg)
elif arg:
if self.SPLIT_ARG is True:
try:
self.args = tuple([a.decode('utf-8') for a in
shlex.split(arg.encode('utf-8'))])
except (ValueError, UnicodeEncodeError, UnicodeDecodeError):
raise UsageError(_('Failed to parse arguments'))
else:
self.args = (arg, )
else:
self.args = tuple([])
if 'arg' in self.data:
self.args = tuple(list(self.args) + self.data['arg'])
self._create_event()
def state_as_query_args(self):
args = {}
if self.args:
args['arg'] = self._sloppy_copy(self.args)
args.update(self._sloppy_copy(self.data))
return args
def cache_id(self, sqa=None):
if self.COMMAND_CACHE_TTL < 1:
return ''
from mailpile.urlmap import UrlMap
args = sorted(list((sqa or self.state_as_query_args()).iteritems()))
# The replace() stuff makes these usable as CSS class IDs
return ('%s-%s' % (UrlMap.ui_url(self), md5_hex(str(args)))
).replace('/', '-').replace('.', '-')
def cache_requirements(self, result):
raise NotImplementedError('Cachable commands should override this, '
'returning a set() of requirements.')
def cache_result(self, result):
if self.COMMAND_CACHE_TTL > 0:
cache_id = self.cache_id()
if cache_id:
self.session.config.command_cache.cache_result(
cache_id,
time.time() + self.COMMAND_CACHE_TTL,
self.cache_requirements(result),
self,
result)
self.session.ui.mark(_('Cached result as %s') % cache_id)
def template_path(self, etype, template_id=None, template=None):
path_parts = (template_id or self.SYNOPSIS[2] or 'command').split('/')
if len(path_parts) == 1:
path_parts.append('index')
if template not in (None, etype, 'as.' + etype):
# Security: The template request may come from the URL, so we
# sanitize it very aggressively before heading off
# to the filesystem.
clean_tpl = CleanText(template.replace('.%s' % etype, ''),
banned=(CleanText.FS +
CleanText.WHITESPACE))
path_parts[-1] += '-%s' % clean_tpl
path_parts[-1] += '.' + etype
return os.path.join(*path_parts)
def _gnupg(self):
return GnuPG(self.session.config)
def _config(self):
session, config = self.session, self.session.config
if not config.loaded_config:
config.load(session)
parent = session
config.prepare_workers(session, daemons=self.IS_INTERACTIVE)
if self.IS_INTERACTIVE and not config.daemons_started():
config.prepare_workers(session, daemons=True)
return config
def _idx(self, reset=False, wait=True, wait_all=True, quiet=False):
session, config = self.session, self._config()
if not reset and config.index:
return config.index
def __do_load2():
config.vcards.load_vcards(session)
if not wait_all:
session.ui.report_marks(quiet=quiet)
def __do_load1():
if reset:
config.index = None
session.results = []
session.searched = []
session.displayed = None
idx = config.get_index(session)
if wait_all:
__do_load2()
if not wait:
session.ui.report_marks(quiet=quiet)
return idx
if wait:
rv = config.save_worker.do(session, 'Load', __do_load1)
session.ui.reset_marks(quiet=quiet)
else:
config.save_worker.add_task(session, 'Load', __do_load1)
rv = None
if not wait_all:
config.save_worker.add_task(session, 'Load2', __do_load2)
return rv
def _background_save(self,
everything=False, config=False,
index=False, index_full=False,
wait=False, wait_callback=None):
session, cfg = self.session, self.session.config
aut = cfg.save_worker.add_unique_task
if everything or config:
aut(session, 'Save config', cfg.save)
if cfg.index:
cfg.flush_mbox_cache(session, clear=False, wait=wait)
if index_full:
aut(session, 'Save index', lambda: self._idx().save(session))
elif everything or index:
aut(session, 'Save index changes',
lambda: self._idx().save_changes(session))
if wait:
wait_callback = wait_callback or (lambda: True)
cfg.save_worker.do(session, 'Waiting', wait_callback)
def _choose_messages(self, words, allow_ephemeral=False):
msg_ids = set()
all_words = []
for word in words:
all_words.extend(word.split(','))
for what in all_words:
if what.lower() == 'these':
if self.session.displayed:
b = self.session.displayed['stats']['start'] - 1
c = self.session.displayed['stats']['count']
msg_ids |= set(self.session.results[b:b + c])
else:
self.session.ui.warning(_('No results to choose from!'))
elif what.lower() == 'all':
if self.session.results:
msg_ids |= set(self.session.results)
else:
self.session.ui.warning(_('No results to choose from!'))
elif what.startswith('='):
try:
msg_id = int(what[1:], 36)
if msg_id >= 0 and msg_id < len(self._idx().INDEX):
msg_ids.add(msg_id)
else:
self.session.ui.warning((_('No such ID: %s')
) % (what[1:], ))
except ValueError:
if allow_ephemeral and '-' in what:
msg_ids.add(what[1:])
else:
self.session.ui.warning(_('What message is %s?'
) % (what, ))
elif '-' in what:
try:
b, e = what.split('-')
msg_ids |= set(self.session.results[int(b) - 1:int(e)])
except (ValueError, KeyError, IndexError, TypeError):
self.session.ui.warning(_('What message is %s?'
) % (what, ))
else:
try:
msg_ids.add(self.session.results[int(what) - 1])
except (ValueError, KeyError, IndexError, TypeError):
self.session.ui.warning(_('What message is %s?'
) % (what, ))
return msg_ids
def _error(self, message, info=None, result=None):
self.status = 'error'
self.message = message
ui_message = _('%s error: %s') % (self.name, message)
if info:
self.error_info.update(info)
details = ' '.join(['%s=%s' % (k, info[k]) for k in info])
ui_message += ' (%s)' % details
self.session.ui.mark(self.name)
self.session.ui.error(ui_message)
if result:
return self.view(result)
else:
return False
def _success(self, message, result=True):
self.status = 'success'
self.message = message
ui_message = '%s: %s' % (self.name, message)
self.session.ui.mark(ui_message)
return self.view(result)
def _read_file_or_data(self, fn):
if fn in self.data:
return self.data[fn]
else:
return open(fn, 'rb').read()
def _ignore_exception(self):
self.session.ui.debug(traceback.format_exc())
def _serialize(self, name, function):
return function()
def _background(self, name, function):
session, config = self.session, self.session.config
return config.slow_worker.add_task(session, name, function)
def _update_event_state(self, state, log=False):
self.event.flags = state
self.event.data['elapsed'] = int(1000 * (time.time()-self._start_time))
if (log or self.LOG_PROGRESS) and not self.LOG_NOTHING:
ui = str(self.session.ui.__class__).replace('mailpile.', '.')
self.event.data['ui'] = ui
self.event.data['output'] = self.session.ui.render_mode
if self.session.config.event_log:
self.session.config.event_log.log_event(self.event)
def _starting(self):
self._start_time = time.time()
self._update_event_state(Event.RUNNING)
if self.name:
self.session.ui.start_command(self.name, self.args, self.data)
def _fmt_msg(self, message):
return message % {'name': self.name,
'status': self.status or '',
'message': self.message or ''}
def _sloppy_copy(self, data, name=None):
if name and 'pass' == name[:4]:
data = '(SUPPRESSED)'
def copy_value(v):
try:
unicode(v).encode('utf-8')
return unicode(v)[:1024]
except (UnicodeEncodeError, UnicodeDecodeError):
return '(BINARY DATA)'
if isinstance(data, (list, tuple)):
return [self._sloppy_copy(i, name=name) for i in data]
elif isinstance(data, dict):
return dict((k, self._sloppy_copy(v, name=k))
for k, v in data.iteritems())
else:
return copy_value(data)
def _create_event(self):
private_data = {}
if self.LOG_ARGUMENTS:
if self.data:
private_data['data'] = self._sloppy_copy(self.data)
if self.args:
private_data['args'] = self._sloppy_copy(self.args)
self.event = self._make_command_event(private_data)
def _make_command_event(self, private_data):
return Event(source=self,
message=self._fmt_msg(self.LOG_STARTING),
flags=Event.INCOMPLETE,
data={},
private_data=private_data)
def _finishing(self, command, rv, just_cleanup=False):
if just_cleanup:
self._update_finished_event()
return rv
if not self.context:
self.context = self.session.get_context(
update=self.CHANGES_SESSION_CONTEXT)
self.session.ui.mark(_('Generating result'))
result = self.CommandResult(self, self.session, self.name,
command.__doc__ or self.__doc__,
rv, self.status, self.message,
error_info=self.error_info)
self.cache_result(result)
if not self.run_async:
self._update_finished_event()
return result
def _update_finished_event(self):
# Update the event!
if self.message:
self.event.message = self.message
if self.error_info:
self.event.private_data['error_info'] = self.error_info
self.event.message = self._fmt_msg(self.LOG_FINISHED)
self._update_event_state(Event.COMPLETE, log=True)
self.session.ui.mark(self.event.message)
self.session.ui.report_marks(
details=('timing' in self.session.config.sys.debug))
if self.name:
self.session.ui.finish_command(self.name)
def _run_sync(self, enable_cache, *args, **kwargs):
self._starting()
self._run_args = args
self._run_kwargs = kwargs
if (self.COMMAND_CACHE_TTL > 0 and
'http' not in self.session.config.sys.debug and
enable_cache):
cid = self.cache_id()
try:
rv = self.session.config.command_cache.get_result(cid)
rv.session.ui = self.session.ui
if self.CHANGES_SESSION_CONTEXT:
self.session.copy(rv.session, ui=False)
self.session.ui.mark(_('Using pre-cached result object %s') % cid)
self._finishing(self, True, just_cleanup=True)
return rv
except:
pass
def command(self, *args, **kwargs):
if self.CONFIG_REQUIRED:
if not self.session.config.loaded_config:
return self._error(_('Please log in'))
if mailpile.util.QUITTING:
return self._error(_('Shutting down'))
return self.command(*args, **kwargs)
try:
return self._finishing(command, command(self, *args, **kwargs))
except self.RAISES:
self.status = 'success'
self._finishing(command, True, just_cleanup=True)
raise
except:
self._ignore_exception()
self._error(self.FAILURE % {'name': self.name,
'args': ' '.join(self.args)})
return self._finishing(command, False)
def _run(self, *args, **kwargs):
if self.run_async:
def streetcar():
try:
with MultiContext(self.WITH_CONTEXT):
rv = self._run_sync(True, *args, **kwargs).as_dict()
self.event.private_data.update(rv)
self._update_finished_event()
except:
traceback.print_exc()
self._starting()
self._update_event_state(self.event.RUNNING, log=True)
result = Command.CommandResult(self, self.session, self.name,
self.__doc__,
{"resultid": self.event.event_id},
"success",
"Running in background")
self.session.config.async_worker.add_task(self.session, self.name,
streetcar)
return result
else:
return self._run_sync(True, *args, **kwargs)
def run(self, *args, **kwargs):
with MultiContext(self.WITH_CONTEXT):
if self.IS_USER_ACTIVITY:
try:
mailpile.util.LAST_USER_ACTIVITY = time.time()
mailpile.util.LIVE_USER_ACTIVITIES += 1
return self._run(*args, **kwargs)
finally:
mailpile.util.LIVE_USER_ACTIVITIES -= 1
else:
return self._run(*args, **kwargs)
def refresh(self):
self._create_event()
return self._run_sync(False, *self._run_args, **self._run_kwargs)
def command(self):
return None
def etag_data(self):
return []
def max_age(self):
return 0
@classmethod
def view(cls, result):
return result
##[ Shared basic Search Result class]#########################################
class SearchResults(dict):
_NAME_TITLES = ('the', 'mr', 'ms', 'mrs', 'sir', 'dr', 'lord')
def _name(self, sender, short=True, full_email=False):
words = re.sub('["<>]', '', sender).split()
nomail = [w for w in words if not '@' in w]
if nomail:
if short:
if len(nomail) > 1 and nomail[0].lower() in self._NAME_TITLES:
return nomail[1]
return nomail[0]
return ' '.join(nomail)
elif words:
if not full_email:
return words[0].split('@', 1)[0]
return words[0]
return '(nobody)'
def _names(self, senders):
if len(senders) > 1:
names = {}
for sender in senders:
sname = self._name(sender)
names[sname] = names.get(sname, 0) + 1
namelist = names.keys()
namelist.sort(key=lambda n: -names[n])
return ', '.join(namelist)
if len(senders) < 1:
return '(no sender)'
if senders:
return self._name(senders[0], short=False)
return ''
def _compact(self, namelist, maxlen):
l = len(namelist)
while l > maxlen:
namelist = re.sub(', *[^, \.]+, *', ',,', namelist, 1)
if l == len(namelist):
break
l = len(namelist)
namelist = re.sub(',,,+, *', ' .. ', namelist, 1)
return namelist
TAG_TYPE_FLAG_MAP = {
'trash': 'trash',
'spam': 'spam',
'ham': 'ham',
'drafts': 'draft',
'blank': 'draft',
'sent': 'from_me',
'outbox': 'from_me',
'replied': 'replied',
'fwded': 'forwarded'
}
def _metadata(self, msg_info):
import mailpile.urlmap
nz = lambda l: [v for v in l if v]
msg_ts = long(msg_info[MailIndex.MSG_DATE], 36)
msg_date = datetime.datetime.fromtimestamp(msg_ts)
fe, fn = ExtractEmailAndName(msg_info[MailIndex.MSG_FROM])
f_info = self._address(e=fe, n=fn)
f_info['aid'] = (self._msg_addresses(msg_info, no_to=True, no_cc=True)
or [''])[0]
expl = {
'mid': msg_info[MailIndex.MSG_MID],
'id': msg_info[MailIndex.MSG_ID],
'timestamp': msg_ts,
'from': f_info,
'to_aids': self._msg_addresses(msg_info, no_from=True, no_cc=True),
'cc_aids': self._msg_addresses(msg_info, no_from=True, no_to=True),
'msg_kb': int(msg_info[MailIndex.MSG_KB], 36),
'tag_tids': sorted(self._msg_tags(msg_info)),
'thread_mid': msg_info[MailIndex.MSG_THREAD_MID],
'subject': msg_info[MailIndex.MSG_SUBJECT],
'body': MailIndex.get_body(msg_info),
'flags': {
},
'crypto': {
}
}
# Ephemeral messages do not have URLs
if '-' in msg_info[MailIndex.MSG_MID]:
expl['flags'].update({
'ephemeral': True,
'draft': True,
})
else:
expl['urls'] = {
'thread': self.urlmap.url_thread(msg_info[MailIndex.MSG_MID]),
'source': self.urlmap.url_source(msg_info[MailIndex.MSG_MID]),
}
# Support rich snippets
if expl['body']['snippet'].startswith('{'):
try:
expl['body'] = json.loads(expl['body']['snippet'])
except ValueError:
pass
# Misc flags
sender_vcard = self.idx.config.vcards.get_vcard(fe.lower())
if sender_vcard:
if sender_vcard.kind == 'profile':
expl['flags']['from_me'] = True
tag_types = [self.idx.config.get_tag(t).type for t in expl['tag_tids']]
for t in self.TAG_TYPE_FLAG_MAP:
if t in tag_types:
expl['flags'][self.TAG_TYPE_FLAG_MAP[t]] = True
# Check tags for signs of encryption or signatures
tag_slugs = [self.idx.config.get_tag(t).slug for t in expl['tag_tids']]
for t in tag_slugs:
if t.startswith('mp_sig'):
expl['crypto']['signature'] = t[7:]
elif t.startswith('mp_enc'):
expl['crypto']['encryption'] = t[7:]
# Extra behavior for editable messages
if 'draft' in expl['flags']:
if 'ephemeral' in expl['flags']:
pass
elif self.idx.config.is_editable_message(msg_info):
expl['urls']['editing'] = self.urlmap.url_edit(expl['mid'])
else:
del expl['flags']['draft']
return expl
def _msg_addresses(self, msg_info=None, addresses=[],
no_from=False, no_to=False, no_cc=False):
cids = set()
for ai in addresses:
try:
cids.add(b36(self.idx.EMAIL_IDS[ai.address.lower()]))
except KeyError:
cids.add(b36(self.idx._add_email(ai.address, name=ai.fn)))
if msg_info:
if not no_to:
to = [t for t in msg_info[MailIndex.MSG_TO].split(',') if t]
cids |= set(to)
if not no_cc:
cc = [t for t in msg_info[MailIndex.MSG_CC].split(',') if t]
cids |= set(cc)
if not no_from:
fe, fn = ExtractEmailAndName(msg_info[MailIndex.MSG_FROM])
if fe:
try:
cids.add(b36(self.idx.EMAIL_IDS[fe.lower()]))
except KeyError:
cids.add(b36(self.idx._add_email(fe, name=fn)))
return sorted(list(cids))
def _address(self, cid=None, e=None, n=None):
if cid and not (e and n):
e, n = ExtractEmailAndName(self.idx.EMAILS[int(cid, 36)])
vcard = self.session.config.vcards.get_vcard(e)
if vcard and '@' in n:
n = vcard.fn
return AddressInfo(e, n, vcard=vcard)
def _msg_tags(self, msg_info):
tids = [t for t in msg_info[MailIndex.MSG_TAGS].split(',')
if t and t in self.session.config.tags]
return tids
def _tag(self, tid, attributes={}):
return dict_merge(self.session.config.get_tag_info(tid), attributes)
def _thread(self, thread_mid):
msg_info = self.idx.get_msg_at_idx_pos(int(thread_mid, 36))
thread = [i for i in msg_info[MailIndex.MSG_REPLIES].split(',') if i]
# FIXME: This is a hack, the indexer should just keep things
# in the right order on rescan. Fixing threading is a bigger
# problem though, so we do this for now.
def thread_sort_key(idx):
info = self.idx.get_msg_at_idx_pos(int(thread_mid, 36))
return int(info[self.idx.MSG_DATE], 36)
thread.sort(key=thread_sort_key)
return thread
WANT_MSG_TREE = ('attachments', 'html_parts', 'text_parts', 'header_list',
'editing_strings', 'crypto')
PRUNE_MSG_TREE = ('headers', ) # Added by editing_strings
def _prune_msg_tree(self, tree):
for k in tree.keys():
if k not in self.WANT_MSG_TREE or k in self.PRUNE_MSG_TREE:
del tree[k]
return tree
def _message(self, email):
tree = email.get_message_tree(want=(email.WANT_MSG_TREE_PGP +
self.WANT_MSG_TREE))
email.evaluate_pgp(tree, decrypt=True)
editing_strings = tree.get('editing_strings')
if editing_strings:
for key in ('from', 'to', 'cc', 'bcc'):
if key in editing_strings:
cids = self._msg_addresses(
addresses=AddressHeaderParser(
unicode_data=editing_strings[key]))
editing_strings['%s_aids' % key] = cids
for cid in cids:
if cid not in self['data']['addresses']:
self['data']['addresses'
][cid] = self._address(cid=cid)
return self._prune_msg_tree(tree)
def __init__(self, session, idx,
results=None, start=0, end=None, num=None,
emails=None, people=None,
suppress_data=False, full_threads=True):
dict.__init__(self)
self.session = session
self.people = people
self.emails = emails
self.idx = idx
self.urlmap = mailpile.urlmap.UrlMap(self.session)
results = self.results = results or session.results or []
num = num or session.config.prefs.num_results
if end:
start = end - num
if start > len(results):
start = len(results)
if start < 0:
start = 0
try:
threads = [b36(r) for r in results[start:start + num]]
except TypeError:
results = threads = []
start = end = 0
self.session.ui.mark(_('Parsing metadata for %d results '
'(full_threads=%s)') % (len(threads),
full_threads))
self.update({
'summary': _('Search: %s') % ' '.join(session.searched),
'stats': {
'count': len(threads),
'start': start + 1,
'end': start + min(num, len(results)-start),
'total': len(results),
},
'search_terms': session.searched,
'address_ids': [],
'message_ids': [],
'thread_ids': threads,
})
if 'tags' in self.session.config:
search_tags = [idx.config.get_tag(t.split(':')[1], {})
for t in session.searched
if t.startswith('in:') or t.startswith('tag:')]
search_tag_ids = [t._key for t in search_tags if t]
self.update({
'search_tag_ids': search_tag_ids,
})
if search_tag_ids:
self['summary'] = ' & '.join([t.name for t
in search_tags if t])
else:
search_tag_ids = []
if suppress_data or (not results and not emails):
return
self.update({
'data': {
'addresses': {},
'metadata': {},
'messages': {},
'threads': {}
}
})
if 'tags' in self.session.config:
th = self['data']['tags'] = {}
for tid in search_tag_ids:
if tid not in th:
th[tid] = self._tag(tid, {'searched': True})
idxs = results[start:start + num]
while idxs:
idx_pos = idxs.pop(0)
msg_info = idx.get_msg_at_idx_pos(idx_pos)
self.add_msg_info(b36(idx_pos), msg_info,
full_threads=full_threads, idxs=idxs)
if emails and len(emails) == 1:
self['summary'] = emails[0].get_msg_info(MailIndex.MSG_SUBJECT)
for e in emails or []:
self.add_email(e)
def add_msg_info(self, mid, msg_info, full_threads=False, idxs=None):
# Populate data.metadata
self['data']['metadata'][mid] = self._metadata(msg_info)
# Populate data.thread
thread_mid = msg_info[self.idx.MSG_THREAD_MID]
if thread_mid not in self['data']['threads']:
thread = self._thread(thread_mid)
self['data']['threads'][thread_mid] = thread
if full_threads and idxs:
idxs.extend([int(t, 36) for t in thread
if t not in self['data']['metadata']])
# Populate data.person
for cid in self._msg_addresses(msg_info):
if cid not in self['data']['addresses']:
self['data']['addresses'][cid] = self._address(cid=cid)
# Populate data.tag
if 'tags' in self.session.config:
for tid in self._msg_tags(msg_info):
if tid not in self['data']['tags']:
self['data']['tags'][tid] = self._tag(tid,
{"searched": False})
def add_email(self, e):
if e not in self.emails:
self.emails.append(e)
mid = e.msg_mid()
if mid not in self['data']['messages']:
self['data']['messages'][mid] = self._message(e)
if mid not in self['message_ids']:
self['message_ids'].append(mid)
# This happens last, as the parsing above may have side-effects
# which matter once we get this far.
self.add_msg_info(mid, e.get_msg_info(uncached=True))
def __nonzero__(self):
return True
def next_set(self):
stats = self['stats']
return SearchResults(self.session, self.idx,
start=stats['start'] - 1 + stats['count'])
def previous_set(self):
stats = self['stats']
return SearchResults(self.session, self.idx,
end=stats['start'] - 1)
def as_text(self):
from mailpile.www.jinjaextensions import MailpileCommand as JE
clen = max(3, len('%d' % len(self.session.results)))
cfmt = '%%%d.%ds' % (clen, clen)
term_width = self.session.ui.term.max_width()
fs_width = int((22 + 53) * (term_width / 79.0))
f_width = min(32, int(0.30 * fs_width))
s_width = fs_width - f_width
text = []
count = self['stats']['start']
expand_ids = [e.msg_idx_pos for e in (self.emails or [])]
addresses = self.get('data', {}).get('addresses', {})
for mid in self['thread_ids']:
m = self['data']['metadata'][mid]
tags = [self['data']['tags'][t] for t in m['tag_tids']]
tag_names = [t['name'] for t in tags
if not t.get('searched', False)
and t.get('label', True)
and t.get('display', '') != 'invisible']
tag_new = [t for t in tags if t.get('type') == 'unread']
tag_names.sort()
msg_meta = tag_names and (' (' + '('.join(tag_names)) or ''
# FIXME: this is a bit ugly, but useful for development
es = ['', '']
for t in [t['slug'] for t in tags]:
if t.startswith('mp_enc') and 'none' not in t:
es[1] = 'E'
if t.startswith('mp_sig') and 'none' not in t:
es[0] = 'S'
es = ''.join([e for e in es if e])
if es:
msg_meta = (msg_meta or ' ') + ('[%s]' % es)
elif msg_meta:
msg_meta += ')'
else:
msg_meta += ' '
msg_meta += elapsed_datetime(m['timestamp'])
from_info = (m['from'].get('fn') or m['from'].get('email')
or '(anonymous)')
if from_info[:1] in ('<', '"', '\''):
from_info = from_info[1:]
if from_info[-1:] in ('>', '"', '\''):
from_info = from_info[:-1]
if '@' in from_info and len(from_info) > 18:
e, d = from_info.split('@', 1)
if d in ('gmail.com', 'yahoo.com', 'hotmail.com'):
from_info = '%s@%s..' % (e, d[0])
else:
from_info = '%s..@%s' % (e[0], d)
if not expand_ids:
def gg(pos):
return (pos < 10) and pos or '>'
thread = [m['thread_mid']]
thread += self['data']['threads'][m['thread_mid']]
if m['mid'] not in thread:
thread.append(m['mid'])
pos = thread.index(m['mid']) + 1
if pos > 1:
from_info = '%s>%s' % (gg(pos-1), from_info)
else:
from_info = ' ' + from_info
if pos < len(thread):
from_info = '%s>%s' % (from_info[:20], gg(len(thread)-pos))
subject = re.sub('^(\\[[^\\]]{6})[^\\]]{3,}\\]\\s*', '\\1..] ',
JE._nice_subject(m))
sfmt = '%%-%d.%ds%%s' % (max(1, s_width - (clen + len(msg_meta))),
max(1, s_width - (clen + len(msg_meta))))
ffmt = ' %%-%d.%ds %%s' % (f_width, f_width)
tfmt = cfmt + ffmt + sfmt
text.append(tfmt % (count, from_info, tag_new and '*' or ' ',
subject, msg_meta))
if mid in self['data'].get('messages', {}):
exp_email = self.emails[expand_ids.index(int(mid, 36))]
msg_tree = exp_email.get_message_tree()
text.append('-' * term_width)
text.append(exp_email.get_editing_string(msg_tree,
attachment_headers=False).strip())
if msg_tree['attachments']:
text.append('\nAttachments:')
for a in msg_tree['attachments']:
text.append('%5.5s %s' % ('#%s' % a['count'],
a['filename']))
text.append('-' * term_width)
count += 1
if not count:
text = ['(No messages found)']
return '\n'.join(text) + '\n'
##[ Internals ]###############################################################
class Load(Command):
"""Load or reload the metadata index"""
SYNOPSIS = (None, 'load', None, None)
ORDER = ('Internals', 1)
CONFIG_REQUIRED = False
IS_INTERACTIVE = True
def command(self, reset=True, wait=True, wait_all=False, quiet=False):
try:
if self._idx(reset=reset,
wait=wait,
wait_all=wait_all,
quiet=quiet):
return self._success(_('Loaded metadata index'))
else:
return self._error(_('Failed to loaded metadata index'))
except IOError:
return self._error(_('Failed to decrypt configuration, '
'please log in!'))
class Rescan(Command):
"""Add new messages to index"""
SYNOPSIS = (None, 'rescan', 'rescan',
'[full|vcards|vcards:<src>|both|mailboxes|sources|<msgs>]')
ORDER = ('Internals', 2)
LOG_PROGRESS = True
HTTP_CALLABLE = ('POST',)
HTTP_POST_VARS = {
'which': '[full|vcards|vcards:<src>|both|mailboxes|sources|<msgs>]'
}
def command(self, slowly=False):
session, config, idx = self.session, self.session.config, self._idx()
args = list(self.args)
if 'which' in self.data:
args.extend(self.data['which'])
# Pretend we're idle, to make rescan go fast fast.
if not slowly:
mailpile.util.LAST_USER_ACTIVITY = 0
if args and args[0].lower().startswith('vcards'):
return self._success(_('Rescanned vcards'),
result=self._rescan_vcards(session, args[0]))
elif args and args[0].lower() in ('both', 'mailboxes', 'sources',
'editable'):
which = args[0].lower()
return self._success(_('Rescanned mailboxes'),
result=self._rescan_mailboxes(session,
which=which))
elif args and args[0].lower() == 'full':
config.flush_mbox_cache(session, wait=True)
args.pop(0)
# Clear the cache first, in case the user is flailing about
ClearParseCache(full=True)
msg_idxs = self._choose_messages(args)
if msg_idxs:
for msg_idx_pos in msg_idxs:
e = Email(idx, msg_idx_pos)
try:
session.ui.mark('Re-indexing %s' % e.msg_mid())
idx.index_email(self.session, e)
except KeyboardInterrupt:
raise
except:
self._ignore_exception()
session.ui.warning(_('Failed to reindex: %s'
) % e.msg_mid())
self.event.data["messages"] = len(msg_idxs)
self.session.config.event_log.log_event(self.event)
self._background_save(index=True)
return self._success(_('Indexed %d messages') % len(msg_idxs),
result={'messages': len(msg_idxs)})
else:
# FIXME: Need a lock here?
if 'rescan' in config._running:
return self._success(_('Rescan already in progress'))
config._running['rescan'] = True
try:
results = {}
results.update(self._rescan_vcards(session, 'vcards'))
results.update(self._rescan_mailboxes(session))
self.event.data.update(results)
self.session.config.event_log.log_event(self.event)
if 'aborted' in results:
raise KeyboardInterrupt()
return self._success(_('Rescanned vcards and mailboxes'),
result=results)
except (KeyboardInterrupt), e:
return self._error(_('User aborted'), info=results)
finally:
del config._running['rescan']
def _rescan_vcards(self, session, which):
from mailpile.plugins import PluginManager
config = session.config
imported = 0
importer_cfgs = config.prefs.vcard.importers
which_spec = which.split(':')
importers = []
try:
session.ui.mark(_('Rescanning: %s') % 'vcards')
for importer in PluginManager.VCARD_IMPORTERS.values():
if (len(which_spec) > 1 and
which_spec[1] != importer.SHORT_NAME):
continue
importers.append(importer.SHORT_NAME)
for cfg in importer_cfgs.get(importer.SHORT_NAME, []):
if cfg:
imp = importer(session, cfg)
imported += imp.import_vcards(session, config.vcards)
if mailpile.util.QUITTING:
return {'vcards': imported, 'vcard_sources': importers,
'aborted': True}
except KeyboardInterrupt:
return {'vcards': imported, 'vcard_sources': importers,
'aborted': True}
return {'vcards': imported, 'vcard_sources': importers}
def _rescan_mailboxes(self, session, which='mailboxes'):
import mailpile.mail_source
config = session.config
idx = self._idx()
msg_count = 0
mbox_count = 0
rv = True
try:
session.ui.mark(_('Rescanning: %s') % which)
pre_command = config.prefs.rescan_command
if pre_command and not mailpile.util.QUITTING:
session.ui.mark(_('Running: %s') % pre_command)
try:
MakePopenUnsafe()
subprocess.check_call(pre_command, shell=True)
finally:
MakePopenSafe()
msg_count = 1
if which in ('both', 'mailboxes', 'editable'):
if which == 'editable':
mailboxes = config.get_mailboxes(mail_sources=True)
else:
mailboxes = config.get_mailboxes(mail_sources=False)
for fid, fpath, sc in mailboxes:
if mailpile.util.QUITTING:
break
if fpath == '/dev/null':
continue
try:
session.ui.mark(_('Rescanning: %s %s')
% (fid, fpath))
if which == 'editable':
count = idx.scan_mailbox(session, fid, fpath,
config.open_mailbox,
process_new=False,
editable=True,
event=self.event)
else:
count = idx.scan_mailbox(session, fid, fpath,
config.open_mailbox,
event=self.event)
except ValueError:
self._ignore_exception()
count = -1
if count < 0:
session.ui.warning(_('Failed to rescan: %s') % fpath)
elif count > 0:
msg_count += count
mbox_count += 1
session.ui.mark('\n')
if which in ('both', 'sources'):
ocount = msg_count - 1
while ocount != msg_count:
ocount = msg_count
sources = config.mail_sources.values()
sources.sort(key=lambda k: random.randint(0, 100))
for src in sources:
if mailpile.util.QUITTING:
ocount = msg_count
break
session.ui.mark(_('Rescanning: %s') % (src, ))
count = src.rescan_now(session)
if count > 0:
msg_count += count
mbox_count += 1
session.ui.mark('\n')
if not session.ui.interactive:
break
msg_count -= 1
session.ui.mark(_('Nothing changed'))
except (KeyboardInterrupt, subprocess.CalledProcessError), e:
return {'aborted': True,
'messages': msg_count,
'mailboxes': mbox_count}
finally:
if msg_count:
session.ui.mark('\n')
if msg_count < 500:
self._background_save(index=True)
else:
self._background_save(index_full=True)
return {'messages': msg_count,
'mailboxes': mbox_count}
class Optimize(Command):
"""Optimize the keyword search index"""
SYNOPSIS = (None, 'optimize', None, '[harder]')
ORDER = ('Internals', 3)
def command(self, slowly=False):
try:
if not slowly:
mailpile.util.LAST_USER_ACTIVITY = 0
self._idx().save(self.session)
GlobalPostingList.Optimize(self.session, self._idx(),
force=('harder' in self.args))
return self._success(_('Optimized search engine'))
except KeyboardInterrupt:
return self._error(_('Aborted'))
class BrowseOrLaunch(Command):
"""Launch browser and exit, if already running"""
SYNOPSIS = (None, 'browse_or_launch', None, None)
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
RAISES = (KeyboardInterrupt,)
@classmethod
def Browse(cls, sspec):
http_url = 'http://%s:%s/' % sspec
try:
MakePopenUnsafe()
webbrowser.open(http_url)
return http_url
finally:
MakePopenSafe()
return False
def command(self):
config = self.session.config
if config.http_worker:
sspec = config.http_worker.sspec
else:
sspec = (config.sys.http_host, config.sys.http_port)
try:
socket.create_connection(sspec)
self.Browse(sspec)
os._exit(1)
except IOError:
pass
return self._success(_('Launching Mailpile'), result=True)
class RunWWW(Command):
"""Just run the web server"""
SYNOPSIS = (None, 'www', None, '[<host:port>]')
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
def command(self):
config = self.session.config
if self.args:
sspec = self.args[0].split(':', 1)
sspec[1] = int(sspec[1])
else:
sspec = (config.sys.http_host, config.sys.http_port)
self.session.config.prepare_workers(self.session,
httpd_spec=tuple(sspec),
daemons=True)
if config.http_worker:
http_url = 'http://%s:%s/' % config.http_worker.httpd.sspec
return self._success(_('Started the web server on %s') % http_url)
else:
return self._error(_('Failed to started the web server'))
class WritePID(Command):
"""Write the PID to a file"""
SYNOPSIS = (None, 'pidfile', None, "</path/to/pidfile>")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
SPLIT_ARG = False
def command(self):
with open(self.args[0], 'w') as fd:
fd.write('%d' % os.getpid())
return self._success(_('Wrote PID to %s') % self.args)
class RenderPage(Command):
"""Does nothing, for use by semi-static jinja2 pages"""
SYNOPSIS = (None, None, 'page', None)
ORDER = ('Internals', 6)
CONFIG_REQUIRED = False
SPLIT_ARG = False
HTTP_STRICT_VARS = False
IS_USER_ACTIVITY = True
class CommandResult(Command.CommandResult):
def __init__(self, *args, **kwargs):
Command.CommandResult.__init__(self, *args, **kwargs)
if self.result and 'path' in self.result:
self.template_id = 'page/' + self.result['path'] + '/index'
def command(self):
return self._success(_('Rendered the page'), result={
'path': (self.args and self.args[0] or ''),
'data': self.data
})
class ProgramStatus(Command):
"""Display list of running threads, locks and outstanding events."""
SYNOPSIS = (None, 'ps', 'ps', None)
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = False
LOG_NOTHING = True
class CommandResult(Command.CommandResult):
def as_text(self):
now = time.time()
sessions = self.result.get('sessions')
if sessions:
sessions = '\n'.join(sorted([' %s/%s = %s (%ds)'
% (us['sessionid'],
us['userdata'],
us['userinfo'],
now - us['timestamp'])
for us in sessions]))
else:
sessions = ' ' + _('Nothing Found')
ievents = self.result.get('ievents')
cevents = self.result.get('cevents')
if cevents:
cevents = '\n'.join([' %s %s' % (e.event_id, e.message)
for e in cevents])
else:
cevents = ' ' + _('Nothing Found')
ievents = self.result.get('ievents')
if ievents:
ievents = '\n'.join([' %s:%s %s' % (e.event_id,
e.flags,
e.message)
for e in ievents])
else:
ievents = ' ' + _('Nothing Found')
threads = self.result.get('threads')
if threads:
threads = '\n'.join(sorted([(' ' + str(t)) for t in threads]))
else:
threads = _('Nothing Found')
locks = self.result.get('locks')
if locks:
locks = '\n'.join(sorted([(' %s.%s is %slocked'
) % (l[0], l[1],
'' if l[2] else 'un')
for l in locks]))
else:
locks = _('Nothing Found')
return ('Recent events:\n%s\n\n'
'Events in progress:\n%s\n\n'
'Live sessions:\n%s\n\n'
'Postinglist timers:\n%s\n\n'
'Threads: (bg delay %.3fs, live=%s, httpd=%s)\n%s\n\n'
'Locks:\n%s'
) % (cevents, ievents, sessions,
self.result['pl_timers'],
self.result['delay'],
self.result['live'],
self.result['httpd'],
threads, locks)
def command(self, args=None):
import mailpile.auth
import mailpile.mail_source
import mailpile.plugins.compose
import mailpile.plugins.contacts
config = self.session.config
try:
idx = config.index
locks = [
('config.index', '_lock', idx._lock._is_owned()),
('config.index', '_save_lock', idx._save_lock._is_owned())
]
except AttributeError:
locks = []
if config.vcards:
locks.extend([
('config.vcards', '_lock', config.vcards._lock._is_owned()),
])
locks.extend([
('config', '_lock', config._lock._is_owned()),
('mailpile.postinglist', 'GLOBAL_POSTING_LOCK',
mailpile.postinglist.GLOBAL_POSTING_LOCK._is_owned()),
('mailpile.postinglist', 'GLOBAL_OPTIMIZE_LOCK',
mailpile.plugins.compose.GLOBAL_EDITING_LOCK._is_owned()),
('mailpile.plugins.compose', 'GLOBAL_EDITING_LOCK',
mailpile.plugins.contacts.GLOBAL_VCARD_LOCK._is_owned()),
('mailpile.plugins.contacts', 'GLOBAL_VCARD_LOCK',
mailpile.postinglist.GLOBAL_OPTIMIZE_LOCK.locked()),
('mailpile.postinglist', 'GLOBAL_GPL_LOCK',
mailpile.postinglist.GLOBAL_GPL_LOCK._is_owned()),
])
threads = threading.enumerate()
for thread in threads:
try:
if hasattr(thread, 'lock'):
locks.append([thread, 'lock', thread.lock])
if hasattr(thread, '_lock'):
locks.append([thread, '_lock', thread._lock])
if locks and hasattr(locks[-1][-1], 'locked'):
locks[-1][-1] = locks[-1][-1].locked()
elif locks and hasattr(locks[-1][-1], '_is_owned'):
locks[-1][-1] = locks[-1][-1]._is_owned()
except AttributeError:
pass
import mailpile.auth
import mailpile.httpd
result = {
'sessions': [{'sessionid': k,
'timestamp': v.ts,
'userdata': v.data,
'userinfo': v.auth} for k, v in
mailpile.auth.SESSION_CACHE.iteritems()],
'pl_timers': mailpile.postinglist.TIMERS,
'delay': play_nice_with_threads(sleep=False),
'live': mailpile.util.LIVE_USER_ACTIVITIES,
'httpd': mailpile.httpd.LIVE_HTTP_REQUESTS,
'threads': threads,
'locks': sorted(locks)
}
if config.event_log:
result.update({
'cevents': list(config.event_log.events(flag='c'))[-10:],
'ievents': config.event_log.incomplete(),
})
return self._success(_("Listed events, threads, and locks"),
result=result)
class ListDir(Command):
"""Display working directory listing"""
SYNOPSIS = (None, 'ls', None, "<.../new/path/...>")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
class CommandResult(Command.CommandResult):
def as_text(self):
if self.result:
lines = []
for fn, sz, isdir in self.result:
lines.append(('%10.10s %s%s'
) % (sz, fn, isdir and '/' or ''))
return '\n'.join(lines)
else:
return _('Nothing Found')
def command(self, args=None):
args = list((args is None) and self.args or args or [])
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
try:
file_list = [(f.decode('utf-8'),
os.path.getsize(f),
os.path.isdir(f))
for f in os.listdir('.') if not f.startswith('.')
and not args or [a for a in args if a in f]]
file_list.sort(key=lambda i: i[0].lower())
return self._success(_('Current directory is %s') % os.getcwd(),
result=file_list)
except (OSError, IOError, UnicodeDecodeError), e:
return self._error(_('Failed to list directory: %s') % e)
class ChangeDir(ListDir):
"""Change working directory"""
SYNOPSIS = (None, 'cd', None, "<.../new/path/...>")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
def command(self, args=None):
args = list((args is None) and self.args or args or [])
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
try:
os.chdir(args.pop(0).encode('utf-8'))
return ListDir.command(self, args=args)
except (OSError, IOError, UnicodeEncodeError), e:
return self._error(_('Failed to change directories: %s') % e)
class CatFile(Command):
"""Dump the contents of a file, decrypting if necessary"""
SYNOPSIS = (None, 'cat', None, "</path/to/file> [>/path/to/output]")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
class CommandResult(Command.CommandResult):
def as_text(self):
if isinstance(self.result, list):
return ''.join(self.result)
else:
return ''
def command(self, args=None):
lines = []
files = list(args or self.args)
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
target = tfd = None
if files and files[-1] and files[-1][:1] == '>':
target = files.pop(-1)[1:]
if os.path.exists(target):
return self._error(_('That file already exists: %s'
) % target)
tfd = open(target, 'wb')
cb = lambda ll: [tfd.write(l) for l in ll]
else:
cb = lambda ll: lines.extend((l.decode('utf-8') for l in ll))
for fn in files:
with open(fn, 'r') as fd:
decrypt_and_parse_lines(fd, cb, self.session.config,
newlines=True, decode=None)
if tfd:
tfd.close()
return self._success(_('Dumped to %s: %s'
) % (target, ', '.join(files)))
else:
return self._success(_('Dumped: %s') % ', '.join(files),
result=lines)
##[ Configuration commands ]###################################################
class ConfigSet(Command):
"""Change a setting"""
SYNOPSIS = ('S', 'set', 'settings/set', '<section.variable> <value>')
ORDER = ('Config', 1)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
SPLIT_ARG = False
HTTP_CALLABLE = ('POST', 'UPDATE')
HTTP_STRICT_VARS = False
HTTP_POST_VARS = {
'_section': 'common section, create if needed',
'section.variable': 'value|json-string'
}
def command(self):
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
config = self.session.config
args = list(self.args)
ops = []
if config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
if not config.loaded_config:
self.session.ui.warning(_('WARNING: Any changes will '
'be overwritten on login'))
section = self.data.get('_section', [''])[0]
if section:
# Make sure section exists
ops.append((section, '!CREATE_SECTION'))
for var in self.data.keys():
if var in ('_section', '_method'):
continue
sep = '/' if ('/' in (section+var)) else '.'
svar = (section+sep+var) if section else var
parts = svar.split(sep)
if parts[0] in config.rules:
if svar.endswith('[]'):
ops.append((svar[:-2], json.dumps(self.data[var])))
else:
ops.append((svar, self.data[var][0]))
else:
raise ValueError(_('Invalid section or variable: %s') % var)
if self.args:
arg = ' '.join(self.args)
if '=' in arg:
# Backwards compatiblity with the old 'var = value' syntax.
var, value = [s.strip() for s in arg.split('=', 1)]
var = var.replace(': ', '.').replace(':', '.').replace(' ', '')
else:
var, value = arg.split(' ', 1)
ops.append((var, value))
# We don't have transactions really, but making sure the HTTPD
# is idle (aside from this request) will definitely help.
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
updated = {}
for path, value in ops:
value = value.strip()
if value[:1] in ('{', '[') and value[-1:] in ( ']', '}'):
value = json.loads(value)
try:
try:
cfg, var = config.walk(path.strip(), parent=1)
if value == '!CREATE_SECTION':
if var not in cfg:
cfg[var] = {}
else:
cfg[var] = value
updated[path] = value
except IndexError:
cfg, v1, v2 = config.walk(path.strip(), parent=2)
cfg[v1] = {v2: value}
except TypeError:
raise ValueError('Could not set variable: %s' % path)
if config.loaded_config:
self._background_save(config=True)
return self._success(_('Updated your settings'), result=updated)
class ConfigAdd(Command):
"""Add a new value to a list (or ordered dict) setting"""
SYNOPSIS = (None, 'append', 'settings/add', '<section.variable> <value>')
ORDER = ('Config', 1)
SPLIT_ARG = False
HTTP_CALLABLE = ('POST', 'UPDATE')
HTTP_STRICT_VARS = False
HTTP_POST_VARS = {
'section.variable': 'value|json-string',
}
IS_USER_ACTIVITY = True
def command(self):
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
config = self.session.config
ops = []
if config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
for var in self.data.keys():
parts = ('.' in var) and var.split('.') or var.split('/')
if parts[0] in config.rules:
ops.append((var, self.data[var][0]))
if self.args:
arg = ' '.join(self.args)
if '=' in arg:
# Backwards compatible with the old 'var = value' syntax.
var, value = [s.strip() for s in arg.split('=', 1)]
var = var.replace(': ', '.').replace(':', '.').replace(' ', '')
else:
var, value = arg.split(' ', 1)
ops.append((var, value))
# We don't have transactions really, but making sure the HTTPD
# is idle (aside from this request) will definitely help.
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
updated = {}
for path, value in ops:
value = value.strip()
if value.startswith('{') or value.startswith('['):
value = json.loads(value)
cfg, var = config.walk(path.strip(), parent=1)
cfg[var].append(value)
updated[path] = value
if updated:
self._background_save(config=True)
return self._success(_('Updated your settings'), result=updated)
class ConfigUnset(Command):
"""Reset one or more settings to their defaults"""
SYNOPSIS = ('U', 'unset', 'settings/unset', '<var>')
ORDER = ('Config', 2)
HTTP_CALLABLE = ('POST', )
HTTP_POST_VARS = {
'var': 'section.variables'
}
IS_USER_ACTIVITY = True
def command(self):
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
session, config = self.session, self.session.config
if config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
def unset(cfg, key):
if isinstance(cfg[key], dict):
if '_any' in cfg[key].rules:
for skey in cfg[key].keys():
del cfg[key][skey]
else:
for skey in cfg[key].keys():
unset(cfg[key], skey)
elif isinstance(cfg[key], list):
cfg[key] = []
else:
del cfg[key]
# We don't have transactions really, but making sure the HTTPD
# is idle (aside from this request) will definitely help.
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
updated = []
vlist = list(self.args) + (self.data.get('var', None) or [])
for v in vlist:
cfg, vn = config.walk(v, parent=True)
unset(cfg, vn)
if updated:
self._background_save(config=True)
return self._success(_('Reset to default values'), result=updated)
class ConfigPrint(Command):
"""Print one or more settings"""
SYNOPSIS = ('P', 'print', 'settings', '[-short|-secrets|-flat] <var>')
ORDER = ('Config', 3)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = False
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = {
'var': 'section.variable',
'short': 'Set True to omit unchanged values (defaults)',
'secrets': 'Set True to show passwords and other secrets'
}
HTTP_POST_VARS = {
'user': 'Authenticate as user',
'pass': 'Authenticate with password'
}
def _maybe_all(self, list_all, data, key_types, recurse, sanitize):
if isinstance(data, (dict, list)) and list_all:
rv = {}
for key in data.all_keys():
if [t for t in data.key_types(key) if t not in key_types]:
# Silently omit things that are considered sensitive
continue
rv[key] = data[key]
if hasattr(rv[key], 'all_keys'):
if recurse:
rv[key] = self._maybe_all(True, rv[key], key_types,
recurse, sanitize)
else:
if 'name' in rv[key]:
rv[key] = '{ ..(%s).. }' % rv[key]['name']
elif 'description' in rv[key]:
rv[key] = '{ ..(%s).. }' % rv[key]['description']
elif 'host' in rv[key]:
rv[key] = '{ ..(%s).. }' % rv[key]['host']
else:
rv[key] = '{ ... }'
elif sanitize and key.lower()[:4] in ('pass', 'secr'):
rv[key] = '(SUPPRESSED)'
return rv
return data
def command(self):
session, config = self.session, self.session.config
result = {}
invalid = []
args = list(self.args)
recurse = not self.data.get('flat', ['-flat' in args])[0]
list_all = not self.data.get('short', ['-short' in args])[0]
sanitize = not self.data.get('secrets', ['-secrets' in args])[0]
# FIXME: Shouldn't we suppress critical variables as well?
key_types = ['public', 'critical']
access_denied = False
if self.data.get('_method') == 'POST':
if 'pass' in self.data:
from mailpile.auth import CheckPassword
password = self.data['pass'][0]
auth_user = CheckPassword(config,
self.data.get('user', [None])[0],
password)
if auth_user == 'DEFAULT':
key_types += ['key']
result['_auth_user'] = auth_user
result['_auth_pass'] = password
for key in (args + self.data.get('var', [])):
if key in ('-short', ):
continue
try:
data = config.walk(key, key_types=key_types)
result[key] = self._maybe_all(list_all, data, key_types,
recurse, sanitize)
except AccessError:
access_denied = True
invalid.append(key)
except KeyError:
invalid.append(key)
if invalid:
return self._error(_('Invalid keys'),
result=result, info={
'keys': invalid,
'key_types': key_types,
'access_denied': access_denied
})
else:
return self._success(_('Displayed settings'), result=result)
class AddMailboxes(Command):
"""Add one or more mailboxes"""
SYNOPSIS = ('A', 'add', None, '<path/to/mailbox>')
ORDER = ('Config', 4)
SPLIT_ARG = False
HTTP_CALLABLE = ('POST', 'UPDATE')
IS_USER_ACTIVITY = True
MAX_PATHS = 50000
def command(self):
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
session, config = self.session, self.session.config
adding = []
existing = config.sys.mailbox
paths = list(self.args)
if config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
try:
while paths:
raw_fn = paths.pop(0)
fn = os.path.normpath(os.path.expanduser(raw_fn))
fn = os.path.abspath(fn)
if raw_fn in existing or fn in existing:
session.ui.warning('Already in the pile: %s' % raw_fn)
elif raw_fn.startswith("imap://"):
adding.append(raw_fn)
elif IsMailbox(fn, config):
adding.append(raw_fn)
elif os.path.exists(fn) and os.path.isdir(fn):
session.ui.mark('Scanning %s for mailboxes' % fn)
try:
for f in [f for f in os.listdir(fn)
if not f.startswith('.')]:
paths.append(os.path.join(fn, f))
if len(paths) > self.MAX_PATHS:
return self._error(_('Too many files'))
except OSError:
if raw_fn in self.args:
return self._error(_('Failed to read: %s'
) % raw_fn)
elif raw_fn in self.args:
return self._error(_('No such file or directory: %s'
) % raw_fn)
except KeyboardInterrupt:
return self._error(_('User aborted'))
added = {}
# We don't have transactions really, but making sure the HTTPD
# is idle (aside from this request) will definitely help.
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
for arg in adding:
added[config.sys.mailbox.append(arg)] = arg
if added:
self._background_save(config=True)
return self._success(_('Added %d mailboxes') % len(added),
result={'added': added})
else:
return self._success(_('Nothing was added'))
###############################################################################
class Cached(Command):
"""Fetch results from the command cache."""
SYNOPSIS = (None, 'cached', 'cached', '[<cache-id>]')
ORDER = ('Internals', 7)
HTTP_QUERY_VARS = {'id': 'Cache ID of command to redisplay'}
IS_USER_ACTIVITY = False
LOG_NOTHING = True
def run(self):
try:
cid = self.args[0] if self.args else self.data.get('id', [None])[0]
rv = self.session.config.command_cache.get_result(cid)
self.session.copy(rv.session)
return rv
except:
self._starting()
self._ignore_exception()
self._error(self.FAILURE % {'name': self.name,
'args': ' '.join(self.args)})
return self._finishing(self, False)
class Output(Command):
"""Choose format for command results."""
SYNOPSIS = (None, 'output', None, '[json|text|html|<template>.html|...]')
ORDER = ('Internals', 7)
CONFIG_REQUIRED = False
HTTP_STRICT_VARS = False
HTTP_AUTH_REQUIRED = False
IS_USER_ACTIVITY = False
LOG_NOTHING = True
def etag_data(self):
return self.get_render_mode()
def max_age(self):
return 364 * 24 * 3600 # A long time!
def get_render_mode(self):
return self.args and self.args[0] or self.session.ui.render_mode
def command(self):
m = self.session.ui.render_mode = self.get_render_mode()
return self._success(_('Set output mode to: %s') % m,
result={'output': m})
class Pipe(Command):
"""Pipe a command to a shell command, file or e-mail"""
SYNOPSIS = (None, 'pipe', None,
"[e@mail.com|command|>filename] -- [<cmd> [args ... ]]")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
def command(self):
if '--' in self.args:
dashdash = self.args.index('--')
target = self.args[0:dashdash]
command, args = self.args[dashdash+1], self.args[dashdash+2:]
else:
target, command, args = [self.args[0]], self.args[1], self.args[2:]
output = ''
result = None
old_ui = self.session.ui
try:
from mailpile.ui import CapturingUserInteraction as CUI
self.session.ui = capture = CUI(self.session.config)
capture.render_mode = old_ui.render_mode
result = Action(self.session, command, ' '.join(args))
capture.display_result(result)
output = capture.captured
finally:
self.session.ui = old_ui
if target[0].startswith('>'):
t = ' '.join(target)
if t[0] == '>':
t = t[1:]
with open(t.strip(), 'w') as fd:
fd.write(output.encode('utf-8'))
elif '@' in target[0]:
from mailpile.plugins.compose import Compose
body = 'Result as %s:\n%s' % (capture.render_mode, output)
if capture.render_mode != 'json' and output[0] not in ('{', '['):
body += '\n\nResult as JSON:\n%s' % result.as_json()
composer = Compose(self.session, data={
'to': target,
'subject': ['Mailpile: %s %s' % (command, ' '.join(args))],
'body': [body]
})
return self._success('Mailing output to %s' % ', '.join(target),
result=composer.run())
else:
try:
self.session.ui.block()
MakePopenUnsafe()
kid = subprocess.Popen(target, shell=True, stdin=PIPE)
rv = kid.communicate(input=output.encode('utf-8'))
finally:
self.session.ui.unblock()
MakePopenSafe()
kid.wait()
if kid.returncode != 0:
return self._error('Error piping to %s' % (target, ),
info={'stderr': rv[1], 'stdout': rv[0]})
return self._success('Wrote %d bytes to %s'
% (len(output), ' '.join(target)))
class Quit(Command):
"""Exit Mailpile, normal shutdown"""
SYNOPSIS = ("q", "quit", "quitquitquit", None)
ABOUT = ("Quit mailpile")
ORDER = ("Internals", 2)
CONFIG_REQUIRED = False
RAISES = (KeyboardInterrupt,)
def command(self):
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
mailpile.util.QUITTING = True
self._background_save(index=True, config=True, wait=True)
try:
import signal
os.kill(mailpile.util.MAIN_PID, signal.SIGINT)
except:
def exiter():
time.sleep(1)
os._exit(0)
threading.Thread(target=exiter).start()
return self._success(_('Shutting down...'))
class TrustingQQQ(Command):
"""Allow anybody to quit the app"""
SYNOPSIS = (None, "trustingqqq", None, None)
def command(self):
# FIXME: This is a hack to allow Windows deployments to shut
# down cleanly. Eventually this will take an argument
# specifying a random token that the launcher chooses.
Quit.HTTP_AUTH_REQUIRED = False
return self._success('OK, anybody can quit!')
class Abort(Command):
"""Force exit Mailpile (kills threads)"""
SYNOPSIS = (None, "quit/abort", "abortabortabort", None)
ABOUT = ("Quit mailpile")
ORDER = ("Internals", 2)
CONFIG_REQUIRED = False
HTTP_QUERY_VARS = {
'no_save': 'Do not try to save state'
}
def command(self):
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
mailpile.util.QUITTING = True
if 'no_save' not in self.data:
self._background_save(index=True, config=True, wait=True,
wait_callback=lambda: os._exit(1))
else:
os._exit(1)
return self._success(_('Shutting down...'))
class Help(Command):
"""Print help on Mailpile or individual commands."""
SYNOPSIS = ('h', 'help', 'help', '[<command-group>]')
ABOUT = ('This is Mailpile!')
ORDER = ('Config', 9)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
class CommandResult(Command.CommandResult):
def splash_as_text(self):
text = [
self.result['splash']
]
if self.result['http_url']:
text.append(_('The Web interface address is: %s'
) % self.result['http_url'])
else:
text.append(_('The Web interface is disabled.'))
text.append('')
b = ' * '
if self.result['interactive']:
text.append(b + _('Type `help` for instructions or `quit` '
'to quit.'))
text.append(b + _('Long running operations can be aborted '
'by pressing: <CTRL-C>'))
if self.result['login_cmd'] and self.result['interactive']:
text.append(b + _('You can log in using the `%s` command.'
) % self.result['login_cmd'])
if self.result['in_browser']:
text.append(b + _('Check your web browser!'))
return '\n'.join(text)
def variables_as_text(self):
text = []
for group in self.result['variables']:
text.append(group['name'])
for var in group['variables']:
sep = ('=' in var['type']) and ': ' or ' = '
text.append((' %-35s %s'
) % (('%s%s<%s>'
) % (var['var'], sep,
var['type'].replace('=', '> = <')),
var['desc']))
text.append('')
return '\n'.join(text)
def commands_as_text(self):
text = [_('Commands:')]
last_rank = None
cmds = self.result['commands']
width = self.result.get('width', 8)
ckeys = cmds.keys()
ckeys.sort(key=lambda k: (cmds[k][3], cmds[k][0]))
arg_width = min(50, max(14, self.session.ui.term.max_width()-70))
for c in ckeys:
cmd, args, explanation, rank = cmds[c]
if not rank or not cmd:
continue
if last_rank and int(rank / 10) != last_rank:
text.append('')
last_rank = int(rank / 10)
if c[0] == '_':
c = ' '
else:
c = '%s|' % c[0]
fmt = ' %%s%%-%d.%ds' % (width, width)
if explanation:
if len(args or '') <= arg_width:
fmt += ' %%-%d.%ds %%s' % (arg_width, arg_width)
else:
pad = len(c) + width + 3 + arg_width
fmt += ' %%s\n%s %%s' % (' ' * pad)
else:
explanation = ''
fmt += ' %s %s '
text.append(fmt % (c, cmd.replace('=', ''),
args and ('%s' % (args, )) or '',
(explanation.splitlines() or [''])[0]))
if self.result.get('tags'):
text.extend([
'',
_('Tags: (use a tag as a command to display tagged '
'messages)'),
'',
self.result['tags'].as_text()
])
return '\n'.join(text)
def as_text(self):
if not self.result:
return _('Error')
return ''.join([
('splash' in self.result) and self.splash_as_text() or '',
(('variables' in self.result) and self.variables_as_text()
or ''),
('commands' in self.result) and self.commands_as_text() or '',
])
def command(self):
config = self.session.config
self.session.ui.reset_marks(quiet=True)
if self.args:
command = self.args[0]
for cls in COMMANDS:
name = cls.SYNOPSIS[1] or cls.SYNOPSIS[2]
width = len(name or '')
if name and name == command:
order = 1
cmd_list = {'_main': (name, cls.SYNOPSIS[3],
cls.__doc__, order)}
subs = [c for c in COMMANDS
if (c.SYNOPSIS[1] or c.SYNOPSIS[2] or ''
).startswith(name + '/')]
for scls in sorted(subs):
sc, scmd, surl, ssynopsis = scls.SYNOPSIS[:4]
order += 1
cmd_list['_%s' % scmd] = (scmd, ssynopsis,
scls.__doc__, order)
width = max(len(scmd or surl), width)
return self._success(_('Displayed help'), result={
'pre': cls.__doc__,
'commands': cmd_list,
'width': width
})
return self._error(_('Unknown command'))
else:
cmd_list = {}
count = 0
for grp in COMMAND_GROUPS:
count += 10
for cls in COMMANDS:
if cls.CONFIG_REQUIRED and not config.loaded_config:
continue
c, name, url, synopsis = cls.SYNOPSIS[:4]
if cls.ORDER[0] == grp and '/' not in (name or ''):
cmd_list[c or '_%s' % name] = (name, synopsis,
cls.__doc__,
count + cls.ORDER[1])
if config.loaded_config:
tags = GetCommand('tags')(self.session).run()
else:
tags = {}
try:
index = self._idx()
except IOError:
index = None
return self._success(_('Displayed help'), result={
'commands': cmd_list,
'tags': tags,
'index': index
})
def _starting(self):
pass
def _finishing(self, command, rv, *args, **kwargs):
return self.CommandResult(self, self.session, self.name,
command.__doc__ or self.__doc__, rv,
self.status, self.message)
class HelpVars(Help):
"""Print help on Mailpile variables"""
SYNOPSIS = (None, 'help/variables', 'help/variables', None)
ABOUT = ('The available mailpile variables')
ORDER = ('Config', 9)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
def command(self):
config = self.session.config.rules
result = []
categories = ["sys", "prefs", "profiles"]
for cat in categories:
variables = []
what = config[cat]
if isinstance(what[2], dict):
for ii, i in what[2].iteritems():
variables.append({
'var': ii,
'type': str(i[1]),
'desc': i[0]
})
variables.sort(key=lambda k: k['var'])
result.append({
'category': cat,
'name': config[cat][0],
'variables': variables
})
result.sort(key=lambda k: config[k['category']][0])
return self._success(_('Displayed variables'),
result={'variables': result})
class HelpSplash(Help):
"""Print Mailpile splash screen"""
SYNOPSIS = (None, 'help/splash', 'help/splash', None)
ORDER = ('Config', 9)
CONFIG_REQUIRED = False
def command(self, interactive=True):
from mailpile.auth import Authenticate
http_worker = self.session.config.http_worker
in_browser = False
if http_worker:
http_url = 'http://%s:%s/' % http_worker.httpd.sspec
if ((sys.platform[:3] in ('dar', 'win') or os.getenv('DISPLAY'))
and self.session.config.prefs.open_in_browser):
if BrowseOrLaunch.Browse(http_worker.httpd.sspec):
in_browser = True
time.sleep(2)
else:
http_url = ''
return self._success(_('Displayed welcome message'), result={
'splash': self.ABOUT,
'http_url': http_url,
'in_browser': in_browser,
'login_cmd': (Authenticate.SYNOPSIS[1]
if not self.session.config.loaded_config else ''),
'interactive': interactive
})
def GetCommand(name):
match = [c for c in COMMANDS if name in c.SYNOPSIS[:3]]
if len(match) == 1:
return match[0]
return None
def Action(session, opt, arg, data=None):
session.ui.reset_marks(quiet=True)
config = session.config
if not opt:
return Help(session, 'help').run()
# Use the COMMANDS dict by default.
command = GetCommand(opt)
if command:
return command(session, opt, arg, data=data).run()
# Tags are commands
if config.loaded_config:
tag = config.get_tag(opt)
if tag:
a = 'in:%s%s%s' % (tag.slug, ' ' if arg else '', arg)
return GetCommand('search')(session, opt, arg=a, data=data).run()
# OK, give up!
raise UsageError(_('Unknown command: %s') % opt)
# Commands starting with _ don't get single-letter shortcodes...
COMMANDS = [
Load, Optimize, Rescan, BrowseOrLaunch, RunWWW, ProgramStatus,
ListDir, ChangeDir, CatFile,
WritePID, ConfigPrint, ConfigSet, ConfigAdd, ConfigUnset, AddMailboxes,
RenderPage, Cached, Output, Pipe,
Help, HelpVars, HelpSplash, Quit, TrustingQQQ, Abort
]
COMMAND_GROUPS = ['Internals', 'Config', 'Searching', 'Tagging', 'Composing']
|
motion_led.py
|
import modules.constants.color_constants as color # Contains different color constants as tuples
import datetime
import schedule
import sys
import threading
from modules.constants.constants import * # Local constants file
from modules.request_handler import * # Local module file
from modules.routines import * # Local module file
from modules.state_changer import * # Local module file
from gpiozero import MotionSensor
from time import sleep
# Set up the motion sensor on GPIO_PIN
pir: MotionSensor = MotionSensor(GPIO_PIN)
def setStateFromMotionLED() -> None:
"""
Sets the LED "on" or "off" state based on the pir sensor reading
:return: None
"""
while True:
# False positive threshold: Check each second in range if motion is detected
for i in range(MOTION_DETECT_THRESHOLD):
if pir.motion_detected:
break
sleep(1)
if pir.motion_detected:
# If the last motion sensor state was no motion detected, turn on the LED and display motion detected
print(datetime.datetime.now().strftime("%X"), ": Motion detected!")
setLED("turn", "on")
pir.wait_for_no_motion()
elif not pir.motion_detected:
print(datetime.datetime.now().strftime("%X"), ": No motion detected!")
setLED("turn", "off")
pir.wait_for_motion()
def setStateFromRoutineLED() -> None:
"""
Sets the LED state based on a schedule/routine
:return: None
"""
schedule.every().day.at(WAKE_UP_TIME).do(wakeUpRoutine)
while True:
schedule.run_pending()
sleep(1)
def main():
motion_state_t = threading.Thread(target=setStateFromMotionLED, daemon=True)
motion_state_t.start()
routine_t = threading.Thread(target=setStateFromRoutineLED, daemon=True)
routine_t.start()
routine_t.join()
motion_state_t.join()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\nInterrupted. Exiting...")
sys.exit(0)
|
server.py
|
import asyncio
import json
import websockets
import random, string
import time
import threading
import math
global all_user
global parties
global rooms
threads = []
all_user = dict() #users online
parties = dict() #parties available
nr = 4#no of participants in one room
avb = 0 #no of rooms
rooms = dict()
rooms[avb] = {"participants": 0}
available_room = "room_"+str(avb)+".txt"
async def register(ws, message):
global avb
global available_room
data = json.loads(message.decode('UTF-8'))
if rooms[avb]["participants"] < nr:
pID = data["pID"]
rooms[avb]["participants"] = rooms[avb]["participants"] + 1
message = "a"
if rooms[avb]["participants"] <= nr/2:
if rooms[avb]["participants"] == 1:
rooms[avb][pID] = {"ws": ws, "x": "0", "y": "0", "rot": "0", "team": "1", "role": "in_chair", "blocks": [], "bits": [], "cur": 0, "backup": 0, "fake": 0}
cong = {"action": "entered_room", "team": "1", "role": "in_chair", "room": avb}
cong = json.dumps(cong)
await ws.send(cong)
message = {"action": "joined_room", "joined_room": pID, "team": "1", "role": "in_chair"}
message = json.dumps(message)
else:
rooms[avb][pID] = {"ws": ws, "x": "0", "y": "0", "rot": "0", "team": "1", "role": "field_player", "bits": 0, "cur": 0, "bit_size": 0, "hack": -1}
cong = {"action": "entered_room", "team": "1", "role": "field_player", "room": avb}
cong = json.dumps(cong)
await ws.send(cong)
message = {"action": "joined_room", "joined_room": pID, "team": "1", "role": "field_player"}
message = json.dumps(message)
else:
if rooms[avb]["participants"] == 3:
rooms[avb][pID] = {"ws": ws, "x": "0", "y": "0", "rot": "0", "team": "2", "role": "in_chair", "blocks": [], "bits": [], "cur": 0, "backup": 0, "fake": 0}
cong = {"action": "entered_room", "team": "2", "role": "in_chair", "room": avb}
cong = json.dumps(cong)
await ws.send(cong)
message = {"action": "joined_room", "joined_room": pID, "team": "2", "role": "in_chair"}
message = json.dumps(message)
else:
rooms[avb][pID] = {"ws": ws, "x": "0", "y": "0", "rot": "0", "team": "2", "role": "field_player", "bits": 0, "cur": 0, "bit_size": 0, "hack": -1}
cong = {"action": "entered_room", "team": "2", "role": "field_player", "room": avb}
cong = json.dumps(cong)
await ws.send(cong)
message = {"action": "joined_room", "joined_room": pID, "team": "2", "role": "field_player"}
message = json.dumps(message)
users = []
if rooms[avb]["participants"] > 1:
for key in rooms[avb].keys():
if key != "participants" and key != pID:
users.append(key)
data = {"action": "in_room", "in_room": key, "team": rooms[avb][key]["team"], "role": rooms[avb][key]["role"]}
data = json.dumps(data)
await ws.send(data)
await asyncio.wait([all_user[user]['ws'].send(message) for user in all_user if user in users])
else:
avb = avb+1
pID = data["pID"]
rooms[avb] = dict()
rooms[avb][pID] = {"ws": ws, "x": "0", "y": "0", "rot": "0", "team": "1", "role": "in_chair", "blocks": [], "bits": [], "cur": 0, "backup": 0, "fake": 0}
rooms[avb]["participants"] = 1
cong = {"action": "entered_room", "team": "1", "role": "in_chair", "room": avb}
cong = json.dumps(cong)
await ws.send(cong)
async def move_state(data):
pid = data['pID']
users = []
for key in rooms[data["room"]].keys():
if key != "participants" and key != data["pID"]:
users.append(key)
data = json.dumps(data)
temp = [all_user[user]['ws'].send(data) for user in all_user if user in users]
if len(temp) > 0:
await asyncio.wait(temp)
async def respawn(data):
d = {"action": "respawn"}
d = json.dumps(d)
users = []
for key in rooms[data["room"]].keys():
if key != "participants" and key != data["pID"]:
users.append(key)
await asyncio.wait([all_user[user]['ws'].send(d) for user in all_user if user in users])
async def eject(data):
pid = data["pID"]
for key in rooms[data["room"]].keys():
if key != "participants":
if rooms[data["room"]][key]["team"] == data["team"] and rooms[data["room"]][key]["role"] == "in_chair":
if "fake" in data.keys():
if data["fake"] == data["team"]:
rooms[data["room"]][key]["fake"] = 1
rooms[data["room"]][key]["bits"].append(rooms[data["room"]][pid]["bit_size"])
rooms[data["room"]][key]["cur"] += rooms[data["room"]][pid]["cur"]
message = {"action": "eject", "bit_size": rooms[data["room"]][pid]["bit_size"], "cur": rooms[data["room"]][pid]["cur"]}
message = json.dumps(message)
await all_user[key]["ws"].send(message)
break
rooms[data["room"]][pid]["bits"] = 0
rooms[data["room"]][pid]["bit_size"] = 0
rooms[data["room"]][pid]["cur"] = 0
message = {"action": "eject", "bit_size": 0, "cur": 0, "bits": 0}
message = json.dumps(message)
await all_user[pid]["ws"].send(message)
async def create(data):
pid = data["pID"]
for key in rooms[data["room"]].keys():
if key != "participants":
if rooms[data["room"]][key]["team"] == data["team"] and rooms[data["room"]][key]["role"] == "in_chair" and rooms[data["room"]][key]["cur"] > 0:
if rooms[data["room"]][key]["fake"] == 1:
rooms[data["room"]][key]["blocks"] = []
message = {"action": "shit", "blocks": []}
message = json.dumps(message)
await all_user[key]['ws'].send(message)
break
rooms[data["room"]][key]["blocks"].append(rooms[data["room"]][key]["bits"][0])
rooms[data["room"]][key]["bits"].pop(0)
rooms[data["room"]][key]["cur"] -= 1
message = {"action": "create", "bits": rooms[data["room"]][key]["bits"], "cur": rooms[data["room"]][key]["cur"], "blocks": rooms[data["room"]][key]["blocks"]}
message = json.dumps(message)
await all_user[key]["ws"].send(message)
break
message = {"action": "create"}
message = json.dumps(message)
await all_user[pid]['ws'].send(message)
async def hack(data):
pid = data["pID"]
n = 0
for key in rooms[data["room"]].keys():
if key != "participants":
if rooms[data["room"]][key]["team"] == data["team"] and rooms[data["room"]][key]["role"] == "in_chair":
n = len(rooms[data["room"]][key]["blocks"])
break
rooms[data["room"]][pid]["hack"] = n
message = {"action": "hack", "hack": n}
message = json.dumps(message)
await all_user[pid]["ws"].send(message)
async def hacked(data):
pid = data["pID"]
for key in rooms[data["room"]].keys():
if key != "participants":
if rooms[data["room"]][key]["team"] == data["team"] and rooms[data["room"]][key]["role"] == "in_chair":
rooms[data["room"]][key]["blocks"] = rooms[data["room"]][key]["team"][:data[hack]]
message = {"action": "hacked", "hack": rooms[data["room"]][key]["blocks"]}
message = json.dumps(message)
await all_user[key]["ws"].send(message)
break
message = {"action": "hacked", "hack": -1}
message = json.dumps(message)
await all_user[pid]["ws"].send(message)
async def sell(data):
pid = data["pID"]
rooms[data["room"]][pid]["cur"] = 100*data["bit_size"]
rooms[data["room"]][pid]["bits"].pop(data["bit_size"])
message = {"action": sell, "cur": rooms[data["room"]][pid]["cur"], "bits": rooms[data["room"]][pid]["bits"]}
message = json.dumps(message)
await all_user[pid]["ws"].send(message)
async def bot(data):
pid = data["pID"]
if rooms[data["room"]][pid]["cur"] >= 50:
rooms[data["room"]][pid]["cur"] -= 50
message = {"action": "bot_charge", "cur": rooms[data["room"]][pid]["cur"]}
message = json.dumps(message)
await all_user[pid]['ws'].send(message)
if data["team"] == "team1":
x = 0
y = 0
users = []
message = {"action": "bot", "x": x, "y": y}
message = json.dumps(message)
for user in rooms[data["room"]].keys():
if user != "participants":
users.append(user)
await asyncio.wait([all_user[user]['ws'].send(message) for user in all_user if user in users])
else:
x = 10
y = 10
users = []
message = {"action": "bot", "x": x, "y": y}
message = json.dumps(message)
for user in rooms[data["room"]].keys():
if user != "participants":
users.append(user)
await asyncio.wait([all_user[user]['ws'].send(message) for user in all_user if user in users])
async def backup(data):
pid = data["pID"]
cost = data["no"]*100
if rooms[data["room"]][pid]["cur"] >= cost:
rooms[data["room"]][pid]["cur"] -= cost
rooms[data["room"]][pid]["backup"] += data["no"]
message = {"action": "backup", "backup": rooms[data["room"]][pid]["backup"], "cur": rooms[data["room"]][pid]["cur"]}
message = json.dumps(message)
await all_user[pid]["ws"].send(message)
async def recall(data):
pid = data["pID"]
if rooms[data["room"]][pid]["backup"] != 0:
rooms[data["room"]][pid]["blocks"] = [rooms[data["room"]][pid]["backup"]]
message = {"action": "recall", "blocks": rooms[data["room"]][pid]["blocks"]}
message = json.dumps(message)
await all_user[pid]['ws'].send(message)
async def change(data):
id_1 = data["pID"]
id_2 = data["tID"]
temp = rooms[data["room"]][id_1]
rooms[data["room"]][id_1] = rooms[data["room"]][id_2]
rooms[data["room"]][id_2] = temp
users = []
message = {"action": "change", "in_chair": id_2, "field_guy": id_1}
message = json.dumps(message)
for user in rooms[data["room"]].keys():
if user != "participants":
users.append(user)
await asyncio.wait([all_user[user]['ws'].send(message) for user in all_user if user in users])
async def fake(data):
vals = [5, 7, 9, 11, 13, 15]
pillars = [1, 2]
x = [0, 10]
y = [0, 10]
bit = random.choice(vals)
pillar = random.choice(pillars)
alpha_bit = 2*math.pi*random.random()
r_bit = bit * math.sqrt(random.random())
x_bit = bit * math.cos(alpha_bit) + x[pillar]
y_bit = bit * math.sin(alpha_bit) + y[pillar]
message = {"action": "drop", "x_bit": x_bit, "y_bit": y_bit, "bit_size": bit, "fake": data["team"]}
message = json.dump(message)
for user in rooms[data["room"]].keys():
if user != "participants":
await all_user[user]['ws'].send(message)
async def collect(data):
pid = data['pID']
if data["type"] == "bits":
if rooms[data["room"]][pid]["bits"] < 1:
rooms[data["room"]][pid]["bits"] += 1
rooms[data["room"]][pid]["bit_size"] = data["size"]
message = "a"
if "fake" in data.keys():
message = {"bits": 1, "bit_size": data["size"], "fake": data["fake"]}
message = json.dumps(message)
else:
message = {"bits": 1, "bit_size": data["size"]}
message = json.dumps(message)
await all_user[pid]["ws"].send(message)
data = json.dumps(data)
if len(all_user) > 1: # asyncio.wait doesn't accept an empty list
await asyncio.wait([all_user[user]['ws'].send(data) for user in all_user if user != pid])
if data["type"] == "cur":
if rooms[data["room"]][pid]["cur"] < 5:
rooms[data["room"]][pid]["cur"] += 1
message = {"cur": rooms[data["room"]][pid]["cur"]}
message = json.dumps(message)
await all_user[pid]["ws"].send(message)
data = json.dumps(data)
if len(all_user) > 1: # asyncio.wait doesn't accept an empty list
await asyncio.wait([all_user[user]['ws'].send(data) for user in all_user if user != pid])
async def pingpong():
while True:
for user in all_user:
if not all_user[user]['ws'].connected:
for room in rooms:
if all_user[user] in room.keys():
for u in room.keys():
if u != "participant" and u != user:
message = {"action": "removed", "user": user}
message = json.dumps(message)
await all_user[u]['ws'].send(message)
break
all_user.pop(user)
async def unregister(websocket):
[all_user.remove(user) for user in all_user if user[1] == websocket]
async def coins():
global rooms
vals = [5, 7, 9, 11, 13, 15]
pillars = [1, 2]
x = [-128, 10]
y = [447, 10]
while True:
for room in rooms:
if rooms[room]['participants'] == 0:
continue
bit = random.choice(vals)
pillar = random.choice(pillars)
alpha_bit = 2*math.pi*random.random()
x_bit = bit * (1+math.cos(alpha_bit)) / 2
y_bit = bit * (1+math.sin(alpha_bit)) / 2
x_cur = []
y_cur = []
for i in range(5):
alpha_bit = 2*math.pi*random.random()
x_cur.append(bit * (1+math.cos(alpha_bit)) / 2)
y_cur.append(bit * (1+math.sin(alpha_bit)) / 2)
message = {"action": "drop", "bit_size": bit, "x_bit": x_bit, "y_bit": y_bit, "x_cur1": x_cur[0], "y_cur1": y_cur[0], "x_cur2": x_cur[1], "y_cur2": y_cur[1], "x_cur3": x_cur[2], "y_cur3": y_cur[2], "x_cur4": x_cur[3], "y_cur4": y_cur[3], "x_cur5": x_cur[4], "y_cur5": y_cur[4]}
message = json.dumps(message)
for key in rooms[room].keys():
if key != "participants":
await rooms[room][key]['ws'].send(message)
await asyncio.sleep(10)
def coins_callback():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(coins())
loop.close()
def ping_callback():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(pingpong())
loop.close()
async def counter(websocket, path):
pID = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
all_user[pID]= {"ws":websocket,"x":"0","y":"0"}
data = {"action": "sent_pID", "pID": pID}
data = json.dumps(data)
await websocket.send(data)
#all_user[data[pID]]= {"ws":websocket,"x":"0","y":"0"}
try:
async for message in websocket:
data = json.loads(message.decode('UTF-8'))
if data["action"] == "play":
await register(websocket, message)
elif data["action"] == "move":
await move_state(data)
elif data["action"] == "collect" and rooms[data["room"]][pID]["role"] == "field_player":
await collect(data)
elif data["action"] == "eject" and rooms[data["room"]][pID]["role"] == "field_player":
await eject(data)
elif data["action"] == "create" and rooms[data["room"]][pID]["role"] == "field_player":
await create(data)
elif data["action"] == "hack" and rooms[data["room"]][pID]["role"] == "field_player":
await hack(data)
elif data["action"] == "hacked" and rooms[data["room"]][pID]["role"] == "field_player":
await hacked(data)
elif data["action"] == "sell" and rooms[data["room"]][pID]["role"] == "in_chair":
await sell(data)
elif data["action"] == "bot" and rooms[data["room"]][pID]["role"] == "in_chair":
await bot(data)
elif data["action"] == "backup" and rooms[data["room"]][pID]["role"] == "in_chair":
await backup(data)
elif data["action"] == "recall" and rooms[data["room"]][pID]["role"] == "in_chair":
await recall(data)
elif data["action"] == "change" and rooms[data["room"]][pID]["role"] == "in_chair":
await change(data)
elif data["action"] == "fake" and rooms[data["room"]][pID]["role"] == "in_chair":
await fake(data)
elif data["action"] == "spot":
await respawn(data)
finally:
await unregister(websocket)
start_server = websockets.serve(counter, "localhost", 6789)
asyncio.get_event_loop().run_until_complete(start_server)
t = threading.Thread(target=coins_callback)
threads.append(t)
t.start()
k = threading.Thread(target=ping_callback)
threads.append(k)
k.start()
asyncio.get_event_loop().run_forever()
|
pipeline.py
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import time
from collections import OrderedDict
from itertools import chain, cycle
from threading import Thread
from .queue import AsyncQueue, Signal, StubQueue, VoidQueue, is_stop_signal
from .timer import TimerGroup, IncrementalTimer
class PipelineStep:
def __init__(self):
self.input_queue = None
self.output_queue = VoidQueue()
self.working = False
self.timers = TimerGroup()
self.total_time = IncrementalTimer()
self.own_time = IncrementalTimer()
self._start_t = None
self._thread = None
def process(self, item):
raise NotImplementedError
def end(self):
pass
def setup(self):
pass
def start(self):
if self.input_queue is None or self.output_queue is None:
raise Exception("No input or output queue")
if self._thread is not None:
raise Exception("Thread is already running")
self._thread = Thread(target=self._run)
self._thread.start()
self.working = True
def join(self):
self.input_queue.put(Signal.STOP)
self._thread.join()
self._thread = None
self.working = False
def _run(self):
self._start_t = time.time()
self.setup()
self.total_time = IncrementalTimer()
self.own_time = IncrementalTimer()
while True:
self.total_time.tick()
item = self.input_queue.get()
if self._check_output(item):
break
self.own_time.tick()
output = self.process(item)
self.own_time.tock()
if self._check_output(output):
break
self.total_time.tock()
self.input_queue.task_done()
self.output_queue.put(output)
self.input_queue.close()
self.end()
self.working = False
def _check_output(self, item):
if is_stop_signal(item):
self.output_queue.put(item)
return True
return False
class AsyncPipeline:
def __init__(self):
self.steps = OrderedDict()
self.sync_steps = OrderedDict()
self.async_step = []
self._void_queue = VoidQueue()
self._last_step = None
self._last_parallel = False
def add_step(self, name, new_pipeline_step, max_size=100, parallel=True):
new_pipeline_step.output_queue = self._void_queue
if self._last_step:
if parallel or self._last_parallel:
queue = AsyncQueue(maxsize=max_size)
else:
queue = StubQueue()
self._last_step.output_queue = queue
new_pipeline_step.input_queue = queue
else:
new_pipeline_step.input_queue = self._void_queue
if parallel:
self.steps[name] = new_pipeline_step
else:
self.sync_steps[name] = new_pipeline_step
self._last_step = new_pipeline_step
self._last_parallel = parallel
def run(self):
for step in self.steps.values():
if not step.working:
step.start()
self._run_sync_steps()
def close(self):
for step in self.steps.values():
step.input_queue.put(Signal.STOP_IMMEDIATELY)
for step in self.steps.values():
step.join()
def print_statistics(self):
log.info("Metrics report:")
for name, step in chain(self.sync_steps.items(), self.steps.items(), ):
log.info("\t{} total: {}".format(name, step.total_time))
log.info("\t{} own: {}".format(name, step.own_time))
def _run_sync_steps(self):
"""Run steps in main thread"""
if not self.sync_steps:
while not self._void_queue.finished:
pass
return
for step in self.sync_steps.values():
step.working = True
step.setup()
for step in cycle(self.sync_steps.values()):
step.total_time.tick()
item = step.input_queue.get()
if is_stop_signal(item):
step.input_queue.close()
step.output_queue.put(item)
break
step.own_time.tick()
output = step.process(item)
step.own_time.tock()
if is_stop_signal(output):
step.input_queue.close()
step.output_queue.put(output)
break
step.total_time.tock()
step.output_queue.put(output)
for step in self.sync_steps.values():
step.working = False
step.end()
|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import google3
import numpy as np
from six.moves import xrange
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
__init__.py
|
"""
Package containing the CodaLab client tools.
"""
import json
import logging
import multiprocessing
import os
import yaml
import time
from Queue import Empty
class BaseConfig(object):
"""
Defines a base class for loading configuration values from a YAML-formatted file.
"""
def __init__(self, filename='.codalabconfig'):
self._filename = filename
paths_searched = [self._filename]
if not os.path.exists(self._filename):
self._filename = os.path.join(os.getcwd(), filename)
paths_searched.append(self._filename)
if not os.path.exists(self._filename):
self._filename = os.path.join(os.path.expanduser("~"), filename)
paths_searched.append(self._filename)
env_config_path = os.environ.get('CONFIG_PATH', '/home/azureuser')
if env_config_path and not os.path.exists(self._filename):
self._filename = os.path.join(env_config_path, filename)
paths_searched.append(self._filename)
if not os.path.exists(self._filename):
msg = "Config file not found. Searched for:\n" + "\n".join(paths_searched)
raise EnvironmentError(msg)
with open(self._filename, "r") as f:
self.info = yaml.load(f)
def getFilename(self):
"""Returns the full name of the configuration file."""
return self._filename
def getLoggerDictConfig(self):
"""Gets Dict config for logging configuration."""
return self.info['logging'] if 'logging' in self.info else None
class Queue(object):
"""
Provides an abstract definition for a queue providing one-way asynchronous messaging
between a publisher and a remote subscriber.
"""
def receive_message(self):
"""
Gets the next message from the queue.
Returns a valid QueueMessage instance or None if no message was received.
"""
raise NotImplementedError()
def send_message(self, body):
"""
Sends a message to the queue.
body: A string representing the body of the message.
"""
raise NotImplementedError()
class QueueMessage(object):
"""
Provides an abstract definition for a message exchanged through a queue.
"""
def get_body(self):
"""Gets a string representing the body of the message."""
raise NotImplementedError()
def get_queue(self):
"""Gets the Queue instance from which the message was retrieved."""
raise NotImplementedError()
class QueueMessageError(Exception):
"""Indicates that the body of a queue message cannot be decoded or is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
def decode_message_body(message):
"""
Returns a dictionary instance contructed by decoding the JSON-encoded body
of the given message. The message is expected to decode to a dict containing
the following required key-value pairs:
key='id' -> tracking identifier
key='task_type' -> string defining the type of task expected from the consumer
Input arguments are usually passed with a third optional key-value pair:
key='task_args' -> object defining input arguments for the task
message: A QueueMessage instance.
"""
try:
body = message.get_body()
data = json.loads(body)
except:
raise QueueMessageError("JSON object could not be decoded.")
if not 'id' in data:
raise QueueMessageError("Missing key: id.")
if not 'task_type' in data:
raise QueueMessageError("Missing key: task_type.")
return data
class BaseWorker(object):
"""
Defines the base implementation for a worker process which listens to a queue for
messages. Each message defines a task. When the worker receives a message, it performs
the task then goes back to listening mode.
"""
def __init__(self, queue, vtable, logger):
"""
queue: The Queue object to listen to.
vtable: A map from a task type to a function which contructs a runnable task. Given a
message with an identifier I, a task type T and task arguments A, the function
constructed to run the task is: F = vtable[T](I, A). And F() runs the task.
logger: The logging.Logger object to use.
"""
self.queue = queue
self.logger = logger
self.vtable = vtable
def _message_receive_listen(self, queue):
while True:
try:
self.logger.debug("Waiting for message.")
queue.put('waiting for message')
msg = self.queue.receive_message()
if msg is not None:
queue.put('received message')
self.logger.debug("Received message: %s", msg.get_body())
data = decode_message_body(msg)
task_id = data['id']
task_type = data['task_type']
task_args = data['task_args'] if 'task_args' in data else None
if task_type in self.vtable:
self.logger.info("Running task: id=%s task_type=%s", task_id, task_type)
self.vtable[task_type](task_id, task_args)
self.logger.info("Task complete: id=%s task_type=%s", task_id, task_type)
else:
self.logger.warning("Unknown task_type=%s for task with id=%s", task_type, task_id)
# catch all non-"system exiting" exceptions
except Exception:
self.logger.exception("An error has occurred.")
time.sleep(3)
def start(self):
"""
Starts the worker loop on the current thread.
"""
self.logger.debug("BaseWorker entering worker loop.")
last_message = None
queue = multiprocessing.Queue(8)
worker = multiprocessing.Process(target=self._message_receive_listen, args=(queue,))
worker.start()
while True:
try:
result = queue.get(True, 120)
except Empty:
result = None
self.logger.debug("Process thread status result: %s" % result)
# We don't want to shut off submissions in process, so only terminate if we're waiting for a message
if last_message == 'waiting for message' and result is None:
self.logger.debug("Restarting worker thread")
worker.terminate()
worker = multiprocessing.Process(target=self._message_receive_listen, args=(queue,))
worker.start()
last_message = result
|
scapy_ping.py
|
#encoding: utf-8
import sys
import os
import ipaddress
import multiprocessing
from scapy.all import *
from scapy_ping_one import scapy_ping_one
def scapy_ping_scan(network):
net = ipaddress.ip_network(network.decode('unicode-escape'))
ip_processes = {}
for ip in net:
ip_addr = str(ip)
ping_one = multiprocessing.Process(target=scapy_ping_one, args=(ip_addr, ))
ping_one.start()
ip_processes[ip_addr] = ping_one
ip_list = []
for ip, process in ip_processes.items():
if process.exitcode == 3: # 退出码为3 表示ping成功
ip_list.append(ip)
else:
process.terminate()
return sorted(ip_list)
if __name__ == '__main__':
import time
t1 = time.time()
active_ip = scapy_ping_scan(sys.argv[1])
print("活动的IP地址如下:")
for ip in active_ip:
print(ip)
t2 = time.time()
print(t2 - t1)
|
Hiwin_RT605_ArmCommand_Socket_20190627184954.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
game.py
|
import pygame
import threading
import sys
from core.legacy_cube import Cube
import numpy as np
pygame.init()
window_name = '.'.join(sys.argv[0].split('.')[:-1])
pygame.display.set_caption(window_name if window_name != '' else 'pygame')
SCREEN = pygame.display.set_mode((600, 400))
done = False
clock = pygame.time.Clock()
FRAME_RATE = 60
FILL_CURRENT = (255, 255, 255)
def background(a):
if not isinstance(a, tuple):
a = (a, a, a)
SCREEN.fill(a)
def fill(a, b = None, c = None):
global FILL_CURRENT
if b is None:
FILL_CURRENT = (a, a, a)
else:
FILL_CURRENT = (a, b, c)
def rect(a, b, c, d):
pygame.draw.rect(SCREEN, FILL_CURRENT, pygame.Rect((a, b), (c, d)))
#----------------------------------------------------------------------
faces = {}
sides = {"top" : [150, 50], "bottom" : [150, 250], "front" : [150, 150], "back" : [350, 150], "right" : [250, 150], "left" : [50, 150]}
for i in range(6):
faces[list(sides.keys())[i]] = i * np.ones((3, 3))
cube = Cube(faces)
colors = [(255, 255, 255), (255,255,0), (0,255,0), (0,0,255), (255,0,0), (255,128,0), (0, 0, 0)]
background(0)
def draw():
for side in list(sides.keys()):
i = 0
face = cube.getFace(side)
if side == "top":
face = np.rot90(face, 2)
if side == "bottom":
face = face[::-1]
if side == "front":
face = np.rot90(face, 1)
face = face[:, ::-1]
if side == "back":
face = np.rot90(face, -1)
if side == "right":
face = np.rot90(face, -1)
if side == "left":
face = np.rot90(face, 1)
face = face[:, ::-1]
for row in face:
j = 0
for color in row:
fill(*colors[int(color)])
rect(sides[side][0] + i * 33, sides[side][1] + j * 33, 33, 33)
j += 1
i += 1
#----------------------------------------------------------------------
def terminal():
global done
buff = ""
print("Commands to be in the format \"orientation, side\". \"exit\" to exit", end='')
while not done:
print(buff, end='')
command = input("\n>>> ")
if command == "exit":
done = True
else:
arguments = tuple(command.split(', '))
try:
buff = cube.rotate(*arguments)
except:
done = True
try:
thread = threading.Thread(target=terminal)
thread.start()
except:
print("Error: unable to start thread")
while not done:
draw()
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.display.flip()
clock.tick(FRAME_RATE)
|
TermEmulator.py
|
import socket
import paramiko
import base64
from binascii import hexlify
import getpass
import os
import select
import socket
import sys
import traceback
from paramiko.py3compat import u
import wx
class TermEmulator():
def __init__(self, output_call, auth_info):
self.system = self.sysDetect()
self.auth_info = auth_info
self.trans = None
self.chan = None
self.output_call = output_call
self.init_shell(auth_info)
def sysDetect(self):
system = "posix"
try:
import termios
import tty
except ImportError:
system = "windows"
return system
def sendToTerminalUI(self, data):
wx.CallAfter(self.output_call, data)
def sendChar(self, char):
try:
self.chan.send(char)
except EOFError:
# user hit ^Z or F6
pass
def agent_auth(self, transport, username):
"""
Attempt to authenticate to the given transport using any of the private
keys available from an SSH agent.
"""
agent = paramiko.Agent()
agent_keys = agent.get_keys()
if len(agent_keys) == 0:
return
for key in agent_keys:
print("Trying ssh-agent key %s" % hexlify(key.get_fingerprint()))
try:
transport.auth_publickey(username, key)
print("... success!")
return
except paramiko.SSHException:
print("... nope.")
def manual_auth(self, username, hostname):
default_auth = "p"
auth = raw_input(
"Auth by (p)assword, (r)sa key, or (d)ss key? [%s] " % default_auth
)
if len(auth) == 0:
auth = default_auth
if auth == "r":
default_path = os.path.join(os.environ["HOME"], ".ssh", "id_rsa")
path = raw_input("RSA key [%s]: " % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.RSAKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass("RSA key password: ")
key = paramiko.RSAKey.from_private_key_file(path, password)
self.trans.auth_publickey(username, key)
elif auth == "d":
default_path = os.path.join(os.environ["HOME"], ".ssh", "id_dsa")
path = raw_input("DSS key [%s]: " % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.DSSKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass("DSS key password: ")
key = paramiko.DSSKey.from_private_key_file(path, password)
self.trans.auth_publickey(username, key)
else:
pw = getpass.getpass("Password for %s@%s: " % (username, hostname))
self.trans.auth_password(username, pw)
def login_auth(self):
if self.auth_info.is_psw:
self.trans.auth_password(self.auth_info.username, self.auth_info.password)
def posix_shell(self, chan):
try:
import termios
import tty
except ImportError:
pass
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = u(chan.recv(1024))
if len(x) == 0:
sys.stdout.write("\r\n*** EOF\r\n")
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
# thanks to Mike Looijmans for this code
def windows_shell(self, chan):
import threading
def writeall(sock, output):
while True:
data = sock.recv(256)
if not data:
output("\r\n*** EOF ***\r\n\r\n")
break
output(data)
writer = threading.Thread(target=writeall, args=(chan, self.sendToTerminalUI))
writer.start()
def init_shell(self, auth_info):
paramiko.util.log_to_file("demo.log")
if auth_info.hostname.find("@") >= 0:
auth_info.username, auth_info.hostname = auth_info.hostname.split("@")
if len(auth_info.hostname) == 0:
self.output_call("*** Hostname required.")
sys.exit(1)
if auth_info.hostname.find(":") >= 0:
auth_info.hostname, auth_info.port = auth_info.hostname.split(":")
auth_info.port = int(auth_info.port)
# now connect
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((auth_info.hostname, auth_info.port))
except Exception as e:
self.output_call("*** Connect failed: " + str(e))
traceback.print_exc()
sys.exit(1)
try:
self.trans = paramiko.Transport(sock)
try:
self.trans.start_client()
except paramiko.SSHException:
self.output_call("*** SSH negotiation failed.")
sys.exit(1)
try:
keys = paramiko.util.load_host_keys(
os.path.expanduser("~/.ssh/known_hosts")
)
except IOError:
try:
keys = paramiko.util.load_host_keys(
os.path.expanduser("~/ssh/known_hosts")
)
except IOError:
self.output_call("*** Unable to open host keys file")
keys = {}
# check server's host key -- this is important.
key = self.trans.get_remote_server_key()
if auth_info.hostname not in keys:
self.output_call("*** WARNING: Unknown host key!")
elif key.get_name() not in keys[auth_info.hostname]:
self.output_call("*** WARNING: Unknown host key!")
elif keys[auth_info.hostname][key.get_name()] != key:
self.output_call("*** WARNING: Host key has changed!!!")
sys.exit(1)
else:
self.output_call("*** Host key OK.")
# get username
if auth_info.username == "":
default_username = getpass.getuser()
self.output_call("Username [%s]: " % default_username)
if len(auth_info.username) == 0:
auth_info.username = default_username
self.agent_auth(self.trans, auth_info.username)
if not self.trans.is_authenticated():
# self.manual_auth(auth_info.username, auth_info.hostname)
self.login_auth()
if not self.trans.is_authenticated():
print("*** Authentication failed. :(")
self.trans.close()
sys.exit(1)
self.chan = self.trans.open_session()
self.chan.get_pty()
self.chan.invoke_shell()
if self.system == "posix":
self.posix_shell(self.chan)
elif self.system == "windows":
self.windows_shell(self.chan)
except Exception as e:
print("*** Caught exception: " + str(e.__class__) + ": " + str(e))
traceback.print_exc()
try:
self.trans.close()
except:
pass
sys.exit(1)
def close(self):
self.chan.close()
self.trans.close()
|
installwizard.py
|
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum.base_wizard import BaseWizard
from electrum.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum.gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://electrum/gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://electrum/gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'electrum/gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<ChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://electrum/gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<SeedDialogHeader@GridLayout>
text: ''
options_dialog: None
rows: 1
orientation: 'horizontal'
size_hint: 1, None
height: self.minimum_height
BigLabel:
size_hint: 9, None
text: root.text
IconButton:
id: options_button
height: '30dp'
width: '30dp'
size_hint: 1, None
icon: 'atlas://electrum/gui/kivy/theming/light/gear'
on_release:
root.options_dialog() if root.options_dialog else None
<RestoreSeedDialog>
message: ''
word: ''
SeedDialogHeader:
id: seed_dialog_header
text: 'ENTER YOUR SEED PHRASE'
options_dialog: root.options_dialog
GridLayout:
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://electrum/gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
SeedDialogHeader:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
options_dialog: root.options_dialog
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '48dp'
SeedLabel:
text: root.warning
<ChoiceLineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
SeedLabel:
text: root.message2
TextInput:
id: text_input
multiline: False
size_hint: 1, None
height: '48dp'
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
self.auto_dismiss = False
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
self._trigger_size_dialog = Clock.create_trigger(self._size_dialog)
# note: everything bound here needs to be unbound as otherwise the
# objects will be kept around and keep receiving the callbacks
Window.bind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
self._trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_keyboard(self, instance, key, keycode, codepoint, modifier):
if key == 27:
if self.wizard.can_go_back():
self.wizard.go_back()
else:
app = App.get_running_app()
if not app.is_exit:
app.is_exit = True
app.show_info(_('Press again to exit'))
else:
self._on_release = False
self.dismiss()
return True
def on_dismiss(self):
Window.unbind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
self.wizard.terminate(aborted=True)
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class ChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(ChoiceDialog, self).__init__(wizard, **kwargs)
self.title = kwargs.get('message', '')
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
self.init_choices(choices)
def init_choices(self, choices):
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message = kwargs.get('message', '')
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class CLButton(ToggleButton):
def on_release(self):
self.root.script_type = self.script_type
self.root.set_text(self.value)
class ChoiceLineDialog(ChoiceDialog):
title = StringProperty('')
message1 = StringProperty('')
message2 = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message1 = kwargs.get('message1', '')
self.message2 = kwargs.get('message2', '')
self.choices = kwargs.get('choices', [])
default_choice_idx = kwargs.get('default_choice_idx', 0)
self.ids.next.disabled = False
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for idx, (script_type, title, text) in enumerate(self.choices):
b = CLButton(text=title, height='30dp', group=self.title, allow_no_selection=False)
b.script_type = script_type
b.root = self
b.value = text
layout.add_widget(b)
if idx == default_choice_idx:
b.trigger_action(duration=0)
def set_text(self, value):
self.ids.text_input.text = value
def get_params(self, b):
return (self.ids.text_input.text, self.script_type)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, _):
self.ext = ext
d = SeedOptionsDialog(self.ext, None, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum.mnemonic import Mnemonic
from electrum.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
self.bip39 = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, bip39):
self.ext = ext
self.bip39 = bip39
self.update_next_button()
d = SeedOptionsDialog(self.ext, self.bip39, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def update_next_button(self):
self.ids.next.disabled = False if self.bip39 else not bool(self._test(self.get_text()))
def on_text(self, dt):
self.update_next_button()
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), self.bip39, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def __init__(self, *args, **kwargs):
RestoreSeedDialog.__init__(self, *args, **kwargs)
self.ids.seed_dialog_header.ids.options_button.disabled = True
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
def is_valid(x):
try:
return kwargs['is_valid'](x)
except:
return False
self.is_valid = is_valid
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
def protected_on_finished():
try:
on_finished()
except Exception as e:
self.show_error(str(e))
Clock.schedule_once(lambda dt: protected_on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://electrum/gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, *, storage=None, aborted=False):
if storage is None and not aborted:
storage = self.create_storage(self.path)
self.dispatch('on_wizard_complete', storage)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
ChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def choice_and_line_dialog(self, **kwargs): ChoiceLineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
if force_disable_encrypt_cb:
# do not request PIN for watching-only wallets
run_next(None, False)
return
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, False)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
ES.py
|
import numpy as np
import tensorflow as tf
from datetime import datetime
import time
import gym
import multiprocessing as mp
import scipy.stats as ss
import contextlib
import numpy as np
@contextlib.contextmanager
def temp_seed(seed):
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def mlp(x, hidden_layers, output_layer, activation=tf.tanh, last_activation=None):
'''
Multi-layer perceptron
'''
for l in hidden_layers:
x = tf.layers.dense(x, units=l, activation=activation)
return tf.layers.dense(x, units=output_layer, activation=last_activation)
def test_agent(env_test, agent_op, num_games=1):
'''
Test an agent 'agent_op', 'num_games' times
Return mean and std
'''
games_r = []
steps = 0
for _ in range(num_games):
d = False
game_r = 0
o = env_test.reset()
while not d:
a_s = agent_op(o)
o, r, d, _ = env_test.step(a_s)
game_r += r
steps += 1
games_r.append(game_r)
return games_r, steps
def worker(env_name, initial_seed, hidden_sizes, lr, std_noise, indiv_per_worker, worker_name, params_queue, output_queue):
env = gym.make(env_name)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
import tensorflow as tf
# set an initial seed common to all the workers
tf.random.set_random_seed(initial_seed)
np.random.seed(initial_seed)
with tf.device("/cpu:" + worker_name):
obs_ph = tf.placeholder(shape=(None, obs_dim), dtype=tf.float32, name='obs_ph')
new_weights_ph = tf.placeholder(shape=(None,), dtype=tf.float32, name='new_weights_ph')
def variables_in_scope(scope):
# get all trainable variables in 'scope'
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
with tf.variable_scope('nn_' + worker_name):
acts = mlp(obs_ph, hidden_sizes, act_dim, tf.tanh, last_activation=tf.tanh)
agent_variables = variables_in_scope('nn_' + worker_name)
agent_variables_flatten = flatten_list(agent_variables)
# Update the agent parameters with new weights new_weights_ph
it_v1 = tf.Variable(0, trainable=False)
update_weights = []
for a_v in agent_variables:
upd_rsh = tf.reshape(new_weights_ph[it_v1 : it_v1+tf.reduce_prod(a_v.shape)], shape=a_v.shape)
update_weights.append(a_v.assign(upd_rsh))
it_v1 += tf.reduce_prod(a_v.shape)
# Reshape the new_weights_ph following the neural network shape
it_v2 = tf.Variable(0, trainable=False)
vars_grads_list = []
for a_v in agent_variables:
vars_grads_list.append(tf.reshape(new_weights_ph[it_v2 : it_v2+tf.reduce_prod(a_v.shape)], shape=a_v.shape))
it_v2 += tf.reduce_prod(a_v.shape)
# Create the optimizer
opt = tf.train.AdamOptimizer(lr)
# Apply the "gradients" using Adam
apply_g = opt.apply_gradients([(g, v) for g, v in zip(vars_grads_list, agent_variables)])
def agent_op(o):
a = np.squeeze(sess.run(acts, feed_dict={obs_ph:[o]}))
return np.clip(a, env.action_space.low, env.action_space.high)
def evaluation_on_noise(noise):
'''
Evaluate the agent with the noise
'''
# Get the original weights that will be restored after the evaluation
original_weights = sess.run(agent_variables_flatten)
# Update the weights of the agent/individual by adding the extra noise noise*STD_NOISE
sess.run(update_weights, feed_dict={new_weights_ph:original_weights + noise*std_noise})
# Test the agent with the new weights
rewards, steps = test_agent(env, agent_op)
# Restore the original weights
sess.run(update_weights, feed_dict={new_weights_ph:original_weights})
return np.mean(rewards), steps
config_proto = tf.ConfigProto(device_count={'CPU': 4}, allow_soft_placement=True)
sess = tf.Session(config=config_proto)
sess.run(tf.global_variables_initializer())
agent_flatten_shape = sess.run(agent_variables_flatten).shape
while True:
for _ in range(indiv_per_worker):
seed = np.random.randint(1e7)
with temp_seed(seed):
# sample, for each weight of the agent, from a normal distribution
sampled_noise = np.random.normal(size=agent_flatten_shape)
# Mirrored sampling
pos_rew, stp1 = evaluation_on_noise(sampled_noise)
neg_rew, stp2 = evaluation_on_noise(-sampled_noise)
# Put the returns and seeds on the queue
# Note that here we are just sending the seed (a scalar value), not the complete perturbation sampled_noise
output_queue.put([[pos_rew, neg_rew], seed, stp1+stp2])
# Get all the returns and seed from each other worker
batch_return, batch_seed = params_queue.get()
batch_noise = []
for seed in batch_seed:
# reconstruct the perturbations from the seed
with temp_seed(seed):
sampled_noise = np.random.normal(size=agent_flatten_shape)
batch_noise.append(sampled_noise)
batch_noise.append(-sampled_noise)
# Compute the sthocastic gradient estimate
vars_grads = np.zeros(agent_flatten_shape)
for n, r in zip(batch_noise, batch_return):
vars_grads += n * r
vars_grads /= len(batch_noise) * std_noise
# run Adam optimization on the estimate gradient just computed
sess.run(apply_g, feed_dict={new_weights_ph:-vars_grads})
def normalized_rank(rewards):
'''
Rank the rewards and normalize them.
'''
ranked = ss.rankdata(rewards)
norm = (ranked - 1) / (len(ranked) - 1)
norm -= 0.5
return norm
def flatten(tensor):
'''
Flatten a tensor
'''
return tf.reshape(tensor, shape=(-1,))
def flatten_list(tensor_list):
'''
Flatten a list of tensors
'''
return tf.concat([flatten(t) for t in tensor_list], axis=0)
def ES(env_name, hidden_sizes=[8,8], number_iter=1000, num_workers=4, lr=0.01, indiv_per_worker=10, std_noise=0.01):
initial_seed = np.random.randint(1e7)
# Create a queue for the output values (single returns and seeds values)
output_queue = mp.Queue(maxsize=num_workers*indiv_per_worker)
# Create a queue for the input paramaters (batch return and batch seeds)
params_queue = mp.Queue(maxsize=num_workers)
now = datetime.now()
clock_time = "{}_{}.{}.{}".format(now.day, now.hour, now.minute, now.second)
hyp_str = '-numworkers_'+str(num_workers)+'-lr_'+str(lr)
file_writer = tf.summary.FileWriter('log_dir/'+env_name+'/'+clock_time+'_'+hyp_str, tf.get_default_graph())
processes = []
# Create a parallel process for each worker
for widx in range(num_workers):
p = mp.Process(target=worker, args=(env_name, initial_seed, hidden_sizes, lr, std_noise, indiv_per_worker, str(widx), params_queue, output_queue))
p.start()
processes.append(p)
tot_steps = 0
# Iterate over all the training iterations
for n_iter in range(number_iter):
batch_seed = []
batch_return = []
# Wait until enough candidate individuals are evaluated
for _ in range(num_workers*indiv_per_worker):
p_rews, p_seed, p_steps = output_queue.get()
batch_seed.append(p_seed)
batch_return.extend(p_rews)
tot_steps += p_steps
print('Iter: {} Reward: {:.2f}'.format(n_iter, np.mean(batch_return)))
# Let's save the population's performance
summary = tf.Summary()
for r in batch_return:
summary.value.add(tag='performance', simple_value=r)
file_writer.add_summary(summary, tot_steps)
file_writer.flush()
# Rank and normalize the returns
batch_return = normalized_rank(batch_return)
# Put on the queue all the returns and seed so that each worker can optimize the neural network
for _ in range(num_workers):
params_queue.put([batch_return, batch_seed])
# terminate all workers
for p in processes:
p.terminate()
if __name__ == '__main__':
ES('LunarLanderContinuous-v2', hidden_sizes=[32,32], number_iter=200, num_workers=4, lr=0.02, indiv_per_worker=12, std_noise=0.05)
|
classify_train_mp_ring.py
|
import os
import time
import numpy as np
import numpy.random as rd
import torch
from Demo_deep_learning.yonv_utils import load_data_ary
from Demo_deep_learning.yonv_utils import load_torch_model
from Demo_deep_learning.yonv_utils import whether_remove_history
from torch.nn.utils import clip_grad_norm_
"""Github: Yonv1943
======================================================
dataset net para(MB) time(s) accuracy
----------|----------|----------|----------|----------
MNIST Conv2dNet 1.55 103 >0.99
MNIST SE2dNet 1.57 108 >0.99
Fashion Conv2dNet 1.55 103 >0.92
Fashion SE2dNet 1.57 108 >0.93
CIFAR10 Conv2dNet 2.07 103 >0.79
CIFAR10 SE2dNet 2.09 108 >0.80
======================================================
Without evaluate
Type UsedTime
train_and_evaluate() 100%
train_and_evaluate_mp() 106%
"""
class Arguments:
def __init__(self, ):
self.train_epochs = [max(int(8 * 0.5 ** (i / 2)), 1) for i in range(8)]
self.batch_sizes = [int(128 * 2 ** (i / 2)) for i in range(8)]
self.mid_dim = 2 ** 8
self.if_amp = False
self.if_one_hot = True
self.num_worker = 1
self.mod_dir = 'tutorial_cnn'
self.gpu_id = 0
self.show_gap = 2 ** 1
self.eval_gap = 2 ** 3
self.net_class = None
# self.data_path = '/mnt/sdb1/Yonv/datasets/Data/MNIST/MNIST.npz'
self.data_path = '/mnt/sdb1/Yonv/datasets/Data/FashionMNIST/FashionMNIST.npz'
self.img_shape = (28, 28, 1)
# self.data_path = '/mnt/sdb1/Yonv/datasets/Data/CIFAR10/CIFAR10.npz'
# self.img_shape = (32, 32, 3)
def init_before_training(self, if_main=True):
if if_main:
whether_remove_history(self.mod_dir, remove=True)
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu_id)
np.random.seed(1943 + int(time.time()))
torch.manual_seed(1943 + rd.randint(0, int(time.time())))
'''single processing'''
def train_and_evaluate(args):
net_class = args.net_class
data_path = args.data_path
img_shape = args.img_shape
if_amp = args.if_amp
if_one_hot = args.if_one_hot
mid_dim = args.mid_dim
mod_dir = args.mod_dir
gpu_id = args.gpu_id
train_epochs = args.train_epochs
batch_sizes = args.batch_sizes
show_gap = args.show_gap
eval_gap = args.eval_gap
del args
whether_remove_history(mod_dir, remove=True)
'''init env'''
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) # choose GPU:0
device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
np.random.seed(1943 + int(time.time()))
torch.manual_seed(1943 + rd.randint(0, int(time.time())))
'''load data'''
train_imgs, train_labs, eval__imgs, eval__labs = load_data_ary(data_path, img_shape, if_one_hot)
train_imgs = torch.as_tensor(train_imgs, dtype=torch.float32, device=device)
eval__imgs = torch.as_tensor(eval__imgs, dtype=torch.float32, device=device)
label_data_type = torch.float32 if if_one_hot else torch.long
train_labs = torch.as_tensor(train_labs, dtype=label_data_type, device=device)
eval__labs = torch.as_tensor(eval__labs, dtype=label_data_type, device=device)
del label_data_type
train_len = train_imgs.shape[0]
eval__len = eval__imgs.shape[0]
eval_size = min(2 ** 12, eval__len)
'''train model'''
model = net_class(mid_dim, img_shape).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5)
from torch.nn import functional
criterion = torch.nn.SmoothL1Loss() if if_one_hot else functional.nll_loss
amp_scale = torch.cuda.amp.GradScaler()
'''evaluator'''
evaluator = Evaluator(eval__imgs, eval__labs, eval_size, eval_gap, show_gap, criterion)
save_path = f'{mod_dir}/net.pth'
'''if_amp'''
def gradient_decent_original():
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), max_norm=3.0)
optimizer.step()
def gradient_decent_amp(): # automatic mixed precision
optimizer.zero_grad()
amp_scale.scale(loss).backward() # loss.backward()
amp_scale.unscale_(optimizer) # amp, clip_grad_norm_
clip_grad_norm_(model.parameters(), max_norm=3.0) # amp, clip_grad_norm_
amp_scale.step(optimizer) # optimizer.step()
amp_scale.update() # optimizer.step()
gradient_decent = gradient_decent_amp if if_amp else gradient_decent_original
print("Train Loop:")
learning_rates = [1e-3, ] * len(train_epochs) + [1e-4, ] * 2 + [1e-5, ] * 2
train_epochs.extend(train_epochs[-1:] * (len(learning_rates) - len(train_epochs)))
batch_sizes.extend(batch_sizes[-1:] * (len(learning_rates) - len(batch_sizes)))
for train_epoch, batch_size, learning_rate in zip(train_epochs, batch_sizes, learning_rates):
optimizer.param_groups[0]['lr'] = learning_rate
for epoch in range(train_epoch):
loss_sum = 0
model.train()
'''train_it'''
train_time = int(train_len / batch_size)
for i in range(train_time):
ids = rd.randint(train_len, size=batch_size)
inp = train_imgs[ids]
lab = train_labs[ids]
out = model(inp)
loss = criterion(torch.softmax(out, dim=1), lab)
gradient_decent()
loss_sum += loss.item()
loss_avg = loss_sum / train_time
evaluator.evaluate(model, batch_size, train_epoch, epoch, loss_avg)
evaluator.final_print()
torch.save(model.state_dict(), save_path)
file_size = os.path.getsize(save_path) / (2 ** 20) # Byte --> KB --> MB
print(f"\nSave: {mod_dir} | {file_size:.2f} MB")
'''multiple processing ring'''
def train_and_evaluate_mp_ring(args):
num_worker = args.num_worker
import multiprocessing as mp
process_list = list()
pipe_eva1, pipe_eva2 = mp.Pipe()
process_list.append(mp.Process(target=mp_evaluate, args=(args, pipe_eva1)))
pipe_net_l1 = list()
pipe_net_l2 = list()
for _ in range(num_worker):
pipe_net1, pipe_net2 = mp.Pipe()
pipe_net_l1.append(pipe_net1)
pipe_net_l2.append(pipe_net2)
queue_data_l = list()
for idx in range(num_worker):
queue_data = mp.Queue(8)
queue_data_l.append(queue_data)
process_list.extend([mp.Process(target=mp_train, args=(args, idx, queue_data,
pipe_eva2, pipe_net_l1, pipe_net_l2[idx])),
mp.Process(target=mp_data, args=(args, idx, queue_data))])
[p.start() for p in process_list]
process_list[0].join()
for pipe in [pipe_eva1, pipe_eva2] + pipe_net_l1 + pipe_net_l2:
while pipe.poll():
pipe.recv()
for queue in queue_data_l:
while queue.qsize() > 0:
queue.get()
[p.terminate() for p in process_list[1:]]
def mp_evaluate(args, pipe_eva1):
args.init_before_training(if_main=True)
net_class = args.net_class
data_path = args.data_path
img_shape = args.img_shape
if_one_hot = args.if_one_hot
num_worker = args.num_worker
mid_dim = args.mid_dim
mod_dir = args.mod_dir
train_epochs = args.train_epochs
batch_sizes = args.batch_sizes
show_gap = args.show_gap
eval_gap = args.eval_gap
del args
device = torch.device('cpu')
'''load data'''
train_imgs, train_labs, eval__imgs, eval__labs = load_data_ary(data_path, img_shape, if_one_hot)
# train_imgs = torch.as_tensor(train_imgs, dtype=torch.float32, device=device)
eval__imgs = torch.as_tensor(eval__imgs, dtype=torch.float32, device=device)
label_data_type = torch.float32 if if_one_hot else torch.long
# train_labs = torch.as_tensor(train_labs, dtype=label_data_type, device=device)
eval__labs = torch.as_tensor(eval__labs, dtype=label_data_type, device=device)
del label_data_type
# train_len = train_imgs.shape[0]
eval__len = eval__imgs.shape[0]
eval_size = min(2 ** 12, eval__len)
del train_imgs, train_labs
'''train model'''
model = net_class(mid_dim, img_shape).to(device)
model_cpu = model.to(torch.device("cpu")) # for pipe1_eva
[setattr(param, 'requires_grad', False) for param in model_cpu.parameters()]
del model
for _ in range(num_worker):
pipe_eva1.send(model_cpu.state_dict())
# model_cpu_state_dict = pipe_eva2.recv()
from torch.nn import functional
criterion = torch.nn.SmoothL1Loss() if if_one_hot else functional.nll_loss
'''init evaluate'''
evaluator = Evaluator(eval__imgs, eval__labs, eval_size, eval_gap, show_gap, criterion)
save_path = f'{mod_dir}/net.pth'
learning_rates = [1e-3, ] * len(train_epochs) + [1e-4, ] * 2 + [1e-5, ] * 2
train_epochs.extend(train_epochs[-1:] * (len(learning_rates) - len(train_epochs)))
batch_sizes.extend(batch_sizes[-1:] * (len(learning_rates) - len(batch_sizes)))
# pipe_eva2.send((idx, model_dict, batch_size, train_epoch, epoch, loss_avg))
# pipe_eva2.send('break')
pipe_receive = pipe_eva1.recv()
print("Train Loop:")
with torch.no_grad():
while True:
while pipe_eva1.poll():
# pipe_eva2.send((idx, model_dict, batch_size, train_epoch, epoch, loss_avg))
# pipe_eva2.send('break')
pipe_receive = pipe_eva1.recv()
if pipe_receive == 'break':
break
if pipe_receive == 'break':
break
idx, model_dict, batch_size, train_epoch, epoch, loss_avg = pipe_receive
model_cpu.load_state_dict(model_dict)
evaluator.evaluate(model_cpu, batch_size, train_epoch, epoch, loss_avg)
evaluator.final_print()
torch.save(model_cpu.state_dict(), save_path)
file_size = os.path.getsize(save_path) / (2 ** 20) # Byte --> KB --> MB
print(f"\nSave: {mod_dir} | {file_size:.2f} MB")
def mp_train(args, idx, queue_data, pipe_eva2, pipe_net_l1, pipe_net2):
args.init_before_training(if_main=False)
net_class = args.net_class
# data_path = args.data_path
img_shape = args.img_shape
if_amp = args.if_amp
if_one_hot = args.if_one_hot
num_worker = args.num_worker
mid_dim = args.mid_dim
train_epochs = args.train_epochs
batch_sizes = args.batch_sizes
del args
'''train model'''
device = torch.device(f"cuda:{idx}" if torch.cuda.is_available() else 'cpu')
model = net_class(mid_dim, img_shape).to(device)
# pipe_eva1.send(model_cpu.state_dict())
model_cpu_state_dict = pipe_eva2.recv()
model.load_state_dict(model_cpu_state_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5)
from torch.nn import functional
criterion = torch.nn.SmoothL1Loss() if if_one_hot else functional.nll_loss
amp_scale = torch.cuda.amp.GradScaler()
# queue_data.put(train_len)
train_len = queue_data.get()
'''if_amp'''
def gradient_decent_original():
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), max_norm=3.0)
optimizer.step()
def gradient_decent_amp(): # automatic mixed precision
optimizer.zero_grad()
amp_scale.scale(loss).backward() # loss.backward()
amp_scale.unscale_(optimizer) # amp, clip_grad_norm_
clip_grad_norm_(model.parameters(), max_norm=3.0) # amp, clip_grad_norm_
amp_scale.step(optimizer) # optimizer.step()
amp_scale.update() # optimizer.step()
gradient_decent = gradient_decent_amp if if_amp else gradient_decent_original
'''training loop'''
learning_rates = [1e-3, ] * len(train_epochs) + [1e-4, ] * 2 + [1e-5, ] * 2
train_epochs.extend(train_epochs[-1:] * (len(learning_rates) - len(train_epochs)))
batch_sizes.extend(batch_sizes[-1:] * (len(learning_rates) - len(batch_sizes)))
pipe_eva2_counter = idx
for train_epoch, batch_size, learning_rate in zip(train_epochs, batch_sizes, learning_rates):
optimizer.param_groups[0]['lr'] = learning_rate
for epoch in range(train_epoch):
loss_sum = 0
model.train()
'''train_it'''
train_time = int(train_len / batch_size)
for i in range(train_time):
# queue_data.put((inp, lab))
inp, lab = queue_data.get()
out = model(inp)
loss = criterion(torch.softmax(out, dim=1), lab)
gradient_decent()
loss_sum += loss.item()
pipe_net_l1[(idx + 1) % num_worker].send(model)
current_model = pipe_net2.recv()
soft_update(model, current_model.to(device), tau=0.5)
loss_avg = loss_sum / train_time
pipe_eva2_counter = (pipe_eva2_counter + 1) % num_worker
if pipe_eva2_counter == 0:
model_cpu = model.state_dict()
pipe_eva2.send((idx, model_cpu, batch_size, train_epoch, epoch, loss_avg))
# pipe_receive = pipe_eva1.recv()
pipe_eva2.send('break')
# pipe_receive = pipe_eva1.recv()
while True:
time.sleep(4)
def mp_data(args, idx, queue_data):
args.init_before_training(if_main=False)
data_path = args.data_path
img_shape = args.img_shape
if_one_hot = args.if_one_hot
batch_sizes = args.batch_sizes
train_epochs = args.train_epochs
del args
device = torch.device(f"cuda:{idx}" if torch.cuda.is_available() else 'cpu')
'''load data'''
train_imgs, train_labs, eval__imgs, eval__labs = load_data_ary(data_path, img_shape, if_one_hot)
train_imgs = torch.as_tensor(train_imgs, dtype=torch.float32, device=device)
# eval__imgs = torch.as_tensor(eval__imgs, dtype=torch.float32, device=device)
label_data_type = torch.float32 if if_one_hot else torch.long
train_labs = torch.as_tensor(train_labs, dtype=label_data_type, device=device)
# eval__labs = torch.as_tensor(eval__labs, dtype=label_data_type, device=device)
del label_data_type
train_len = train_imgs.shape[0]
# eval__len = eval__imgs.shape[0]
# eval_size = min(2 ** 12, eval__len)
del eval__imgs, eval__labs
queue_data.put(train_len)
# train_len = queue_data.get()
'''training loop'''
learning_rates = [1e-3, ] * len(train_epochs) + [1e-4, ] * 2 + [1e-5, ] * 2
train_epochs.extend(train_epochs[-1:] * (len(learning_rates) - len(train_epochs)))
batch_sizes.extend(batch_sizes[-1:] * (len(learning_rates) - len(batch_sizes)))
for train_epoch, batch_size, learning_rate in zip(train_epochs, batch_sizes, learning_rates):
for epoch in range(train_epoch):
train_time = int(train_len / batch_size)
for i in range(train_time):
ids = rd.randint(train_len, size=batch_size)
inp = train_imgs[ids]
lab = train_labs[ids]
queue_data.put((inp, lab))
# inp, lab = queue_data.get()
while True:
time.sleep(4)
'''Utils'''
class Evaluator:
def __init__(self, eval__imgs, eval__labs, eval_size, eval_gap, show_gap, criterion):
self.show_gap = show_gap
self.eval__imgs = eval__imgs
self.eval__labs = eval__labs
self.eval__len = len(eval__labs)
self.eval_gap = eval_gap
self.eval_size = eval_size
self.criterion = criterion
self.start_time = time.time()
self.eval_time = self.show_time = 0
self.loss_avg = 0
self.loss_eva = 0
self.accuracy = 0
def evaluate(self, model, batch_size, train_epoch, epoch, loss_avg):
self.loss_avg = loss_avg
time0 = time.time()
if time0 - self.show_time > self.show_gap:
self.show_time = time.time()
print(f"|{batch_size:>4}/{train_epoch - epoch:>4} |loss {self.loss_avg:.4f}")
if time0 - self.eval_time > self.eval_gap:
'''evaluate_it'''
self.eval_time = time.time()
model.eval()
loss_sum_eva = 0
correct = 0
eval__time = int(self.eval__len / self.eval_size)
eval__time = eval__time + 1 if self.eval__len % self.eval_size else eval__time
with torch.no_grad():
for i in range(eval__time):
j = i * self.eval_size
inp = self.eval__imgs[j:j + self.eval_size]
lab = self.eval__labs[j:j + self.eval_size]
out = model(inp)
loss_sum_eva += self.criterion(
torch.softmax(out, dim=1), lab).item() * lab.shape[0]
predict = out.argmax(dim=1, keepdim=True)
int_lab = lab.argmax(dim=1, keepdim=True)
predict_bool = predict.eq(int_lab.view_as(predict))
correct += predict_bool.sum().item()
self.loss_eva = loss_sum_eva / self.eval__len
self.accuracy = correct / self.eval__len
print(f"|{batch_size:>4}/{train_epoch:>4} |loss {self.loss_avg:.4f} "
f"|EvaLoss {self.loss_eva:.4f} |Accu {self.accuracy:.4f}")
def final_print(self):
print(f"TimeUsed: {time.time() - self.start_time:4.0f}|loss {self.loss_avg:.4f} "
f"| EvaLoss {self.loss_eva:.4f} |Accu {self.accuracy:.4f}")
def soft_update(target_net, current_net, tau):
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data.__mul__(tau) + tar.data.__mul__(1 - tau))
'''run'''
def run_test(args):
mod_dir = args.mod_dir
gpu_id = args.gpu_id
net_class = args.net_class
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) # choose GPU:0
device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
model = net_class().to(device)
load_torch_model(mod_dir, model)
in_put = np.zeros((1, 1, 28, 28), dtype=np.float32)
in_put = torch.tensor(in_put, dtype=torch.float32, device=device)
output = model(in_put)
output = output.cpu().data.numpy()[0]
print(np.argmax(output), output)
def run_main():
args = Arguments()
args.gpu_id = '0, 1, 2, 3'
args.if_amp = False
args.if_one_hot = True
args.num_worker = 4
# from Demo_deep_learning.classify_network import Res1dNet
# from Demo_deep_learning.classify_network import Conv2dNet
# from Demo_deep_learning.classify_network import SE2dNet
from Demo_deep_learning.classify_network import ConvNet
args.net_class = ConvNet
# args.train_epochs = [max(int(8 * 0.5 ** (i / 2)), 1) for i in range(8)]
args.train_epochs = [max(int(0 * 0.5 ** (i / 2)), 1) for i in range(8)]
args.batch_sizes = [int(128 * 2 ** (i / 2)) for i in range(8)]
args.data_path = '/mnt/sdb1/Yonv/datasets/Data/CIFAR10/CIFAR10.npz'
args.img_shape = (32, 32, 3)
# train_and_evaluate(args)
# train_and_evaluate_mp(args)
train_and_evaluate_mp_ring(args)
# run_test(args.mod_dir, args.gpu_id)
if __name__ == '__main__':
run_main()
|
28.py
|
from threading import Thread
from time import sleep, ctime
loops = [4, 2, 3]
class ThreadFunc(object):
def __init__(self, func, args, name=''):
self.name = name
self.func = func
self.args = args
def __call__(self):
apply(self.func, self.args)
def loop(nloop, nsec):
print('start loop', nloop, 'at:', ctime())
sleep(nsec)
def main():
print('starting at:', ctime())
threads = []
nloops = range(len(loops))
for i in nloops:
t = Thread(target=ThreadFunc(loop, (i, loops[i]), loop.__name__))
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join()
print('ALL Done at :', ctime())
if __name__ == '__main__':
main()
|
gnmi_sub_on_change.py
|
#!/usr/bin/env python3
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from time import sleep
import grpc
from gnmi import gnmi_pb2
import google.protobuf.text_format
import signal
import sys
import threading
import queue
parser = argparse.ArgumentParser(description='Mininet demo')
parser.add_argument('--grpc-addr', help='P4Runtime gRPC server address',
type=str, action="store", default='localhost:9559')
args = parser.parse_args()
def main():
channel = grpc.insecure_channel(args.grpc_addr)
stub = gnmi_pb2.gNMIStub(channel)
stream_out_q = queue.Queue()
stream_in_q = queue.Queue()
def req_iterator():
while True:
req = stream_out_q.get()
if req is None:
break
print("***************************")
print("REQUEST")
print(req)
print("***************************")
yield req
def stream_recv(stream):
for response in stream:
print("***************************")
print("RESPONSE")
print(response)
print("***************************")
stream_in_q.put(response)
stream = stub.Subscribe(req_iterator())
stream_recv_thread = threading.Thread(
target=stream_recv, args=(stream,))
stream_recv_thread.start()
req = gnmi_pb2.SubscribeRequest()
subList = req.subscribe
subList.mode = gnmi_pb2.SubscriptionList.STREAM
subList.updates_only = True
sub = subList.subscription.add()
sub.mode = gnmi_pb2.ON_CHANGE
path = sub.path
for name in ["interfaces", "interface", "..."]:
e = path.elem.add()
e.name = name
stream_out_q.put(req)
try:
while True:
sleep(1)
except KeyboardInterrupt:
stream_out_q.put(None)
stream_recv_thread.join()
if __name__ == '__main__':
main()
|
tf_util_mod.py
|
import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize(sess=None, return_vars=False):
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
print("new variables being intialized", new_variables)
if sess is None:
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
else:
sess.run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
if return_vars:
return new_variables
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None, sess=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
updates: [tf.Operation] or tf.Operation
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens, sess=sess)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
print('HERE AND NOT PASSING SESS')
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens, sess=None)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens, sess=None):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
self.input_names = {inp.name.split("/")[-1].split(":")[0]: inp for inp in inputs}
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
self.sess = sess
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args, **kwargs):
feed_dict = {}
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
for inpt_name, value in kwargs.items():
self._feed_input(feed_dict, self.input_names[inpt_name], value)
if self.sess is None:
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
else:
results = self.sess.run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
print('Flat grad', loss, var_list)
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32, sess=None):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.sess = sess
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
if self.sess is None:
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
else:
self.sess.run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list, sess=None):
self.sess = sess
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
if self.sess is None:
return tf.get_default_session().run(self.op)
else:
return self.sess.run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/bias" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params*1e-6))
def get_available_gpus(session_config=None):
# based on recipe from https://stackoverflow.com/a/38580201
# Unless we allocate a session here, subsequent attempts to create one
# will ignore our custom config (in particular, allow_growth=True will have
# no effect).
if session_config is None:
session_config = get_session()._config
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices(session_config)
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None, var_list=None):
from baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
#sess = sess if sess is not None else get_session()
if sess is not None:
saver = tf.train.Saver()
saver.restore(sess, fname)
else:
saver = tf.train.Saver()
print(fname,'fname')
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
from baselines import logger
logger.warn('save_state method is deprecated, please use save_variables instead')
sess = sess or get_session()
dirname = os.path.dirname(fname)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
import joblib
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
import joblib
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# ================================================================
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
'''
To log the Tensorflow graph when using rl-algs
algorithms, you can run the following code
in your main script:
import threading, time
def start_tensorboard(session):
time.sleep(10) # Wait until graph is setup
tb_path = osp.join(logger.get_dir(), 'tb')
summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)
summary_op = tf.summary.merge_all()
launch_tensorboard_in_background(tb_path)
session = tf.get_default_session()
t = threading.Thread(target=start_tensorboard, args=([session]))
t.start()
'''
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
|
query_pipe.py
|
# -*- coding:utf8 -*-
# File : query_pipe.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 3/19/17
#
# This file is part of TensorArtist.
from . import configs, utils
from ...core import get_logger
from ...core.utils.callback import CallbackManager
from ...core.utils.meta import notnone_property
import zmq
import threading
import queue
import contextlib
import collections
import pickle
import functools
# import msgpack
# import msgpack_numpy
# msgpack_numpy.patch()
# dumpb = functools.partial(msgpack.dumps, use_bin_type=True)
# loadb = msgpack.loads
import pickle
dumpb = pickle.dumps
loadb = pickle.loads
logger = get_logger(__file__)
__all__ = ['QueryMessage', 'QueryRepPipe', 'QueryReqPipe']
QueryMessage = collections.namedtuple('QueryMessage', ['identifier', 'payload'])
class QueryRepPipe(object):
def __init__(self, name, send_qsize=0, mode='ipc'):
self._name = name
self._conn_info = None
self._context_lock = threading.Lock()
self._context = zmq.Context()
self._tosock = self._context.socket(zmq.ROUTER)
self._frsock = self._context.socket(zmq.PULL)
self._tosock.set_hwm(10)
self._frsock.set_hwm(10)
self._dispatcher = CallbackManager()
self._send_queue = queue.Queue(maxsize=send_qsize)
self._rcv_thread = None
self._snd_thread = None
self._mode = mode
assert mode in ('ipc', 'tcp')
@property
def dispatcher(self):
return self._dispatcher
@notnone_property
def conn_info(self):
return self._conn_info
def initialize(self):
self._conn_info = []
if self._mode == 'tcp':
port = self._frsock.bind_to_random_port('tcp://*')
self._conn_info.append('tcp://{}:{}'.format(utils.get_addr(), port))
port = self._tosock.bind_to_random_port('tcp://*')
self._conn_info.append('tcp://{}:{}'.format(utils.get_addr(), port))
elif self._mode == 'ipc':
self._conn_info.append(utils.bind_to_random_ipc(self._frsock, self._name + '-c2s-'))
self._conn_info.append(utils.bind_to_random_ipc(self._tosock, self._name + '-s2c-'))
self._rcv_thread = threading.Thread(target=self.mainloop_recv, daemon=True)
self._rcv_thread.start()
self._snd_thread = threading.Thread(target=self.mainloop_send, daemon=True)
self._snd_thread.start()
def finalize(self):
utils.graceful_close(self._tosock)
utils.graceful_close(self._frsock)
self._context.term()
@contextlib.contextmanager
def activate(self):
self.initialize()
try:
yield
finally:
self.finalize()
def mainloop_recv(self):
try:
while True:
if self._frsock.closed:
break
msg = loadb(self._frsock.recv(copy=False).bytes)
identifier, type, payload = msg
self._dispatcher.dispatch(type, self, identifier, payload)
except zmq.ContextTerminated:
pass
except zmq.ZMQError as e:
if self._tosock.closed:
logger.warn('Recv socket closed unexpectedly.')
else:
raise e
def mainloop_send(self):
try:
while True:
if self._tosock.closed:
break
job = self._send_queue.get()
self._tosock.send_multipart([job.identifier, dumpb(job.payload)], copy=False)
except zmq.ContextTerminated:
pass
except zmq.ZMQError as e:
if self._tosock.closed:
logger.warn('Send socket closed unexpectedly.')
else:
raise e
def send(self, identifier, msg):
self._send_queue.put(QueryMessage(identifier, msg))
class QueryReqPipe(object):
def __init__(self, name, conn_info):
self._name = name
self._conn_info = conn_info
self._context = None
self._tosock = None
self._frsock = None
@property
def identity(self):
return self._name.encode('utf-8')
def initialize(self):
self._context = zmq.Context()
self._tosock = self._context.socket(zmq.PUSH)
self._frsock = self._context.socket(zmq.DEALER)
self._tosock.setsockopt(zmq.IDENTITY, self.identity)
self._frsock.setsockopt(zmq.IDENTITY, self.identity)
self._tosock.set_hwm(2)
self._tosock.connect(self._conn_info[0])
self._frsock.connect(self._conn_info[1])
def finalize(self):
utils.graceful_close(self._frsock)
utils.graceful_close(self._tosock)
self._context.term()
@contextlib.contextmanager
def activate(self):
self.initialize()
try:
yield
finally:
self.finalize()
def query(self, type, inp, do_recv=True):
self._tosock.send(dumpb((self.identity, type, inp)), copy=False)
if do_recv:
out = loadb(self._frsock.recv(copy=False).bytes)
return out
|
main.py
|
import sys
import threading
from time import sleep
import cv2
import numpy as np
import process
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.uic import loadUi
from PyQt5.QtGui import QPixmap, QImage
from beepy.make_sound import beep
import pyttsx3
class Window(QMainWindow):
def __init__(self):
self.engine = pyttsx3.init()
super(Window, self).__init__()
loadUi('GUImain.ui', self)
with open("style.css", "r") as css:
self.setStyleSheet(css.read())
self.face_decector, self.eye_detector, self.detector = process.init_cv()
self.startButton.clicked.connect(self.start_webcam)
self.stopButton.clicked.connect(self.stop_webcam)
self.camera_is_running = False
self.previous_right_keypoints = None
self.previous_left_keypoints = None
self.previous_right_blob_area = None
self.previous_left_blob_area = None
self.detect_eye_time_start = None
self.detect_eye_time_end = None
self.can_play = True
self.right_can_play = True
self.is_left_detected = False
self.is_right_detected = False
def start_webcam(self):
if not self.camera_is_running:
# self.capture = cv2.VideoCapture(cv2.CAP_DSHOW) # VideoCapture(0) sometimes drops error #-1072875772
self.capture = cv2.VideoCapture(0) # VideoCapture(0) sometimes drops error #-1072875772
if self.capture is None:
self.capture = cv2.VideoCapture(0)
self.camera_is_running = True
self.timer = QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(2)
def stop_webcam(self):
if self.camera_is_running:
self.capture.release()
self.timer.stop()
self.camera_is_running = not self.camera_is_running
def update_frame(self): # logic of the main loop
_, base_image = self.capture.read()
self.display_image(base_image)
processed_image = cv2.cvtColor(base_image, cv2.COLOR_RGB2GRAY)
face_frame, face_frame_gray, left_eye_estimated_position, right_eye_estimated_position, distance, _, _ = process.detect_face(
base_image, processed_image, self.face_decector)
if face_frame is not None:
self.distanseText.setText(distance)
left_eye_frame, right_eye_frame, left_eye_frame_gray, right_eye_frame_gray = process.detect_eyes(face_frame,
face_frame_gray,
left_eye_estimated_position,
right_eye_estimated_position,
self.eye_detector)
if (left_eye_frame is None and right_eye_frame is None):
self.can_play = True
self.right_can_play = True
if right_eye_frame is not None:
if self.rightEyeCheckbox.isChecked():
right_eye_threshold = self.rightEyeThreshold.value()
right_keypoints, self.previous_right_keypoints, self.previous_right_blob_area = self.get_keypoints(
right_eye_frame, right_eye_frame_gray, right_eye_threshold,
previous_area=self.previous_right_blob_area,
previous_keypoint=self.previous_right_keypoints)
process.draw_blobs(right_eye_frame, right_keypoints)
if self.can_play:
t3 = threading.Thread(target=self.sound_play_async, kwargs={'notif':True})
t3.start()
sleep(.5)
self.is_left_detected = True
self.can_play = False
right_eye_frame = np.require(right_eye_frame, np.uint8, 'C')
self.display_image(right_eye_frame, window='right')
if left_eye_frame is not None:
if self.leftEyeCheckbox.isChecked():
left_eye_threshold = self.leftEyeThreshold.value()
left_keypoints, self.previous_left_keypoints, self.previous_left_blob_area = self.get_keypoints(
left_eye_frame, left_eye_frame_gray, left_eye_threshold,
previous_area=self.previous_left_blob_area,
previous_keypoint=self.previous_left_keypoints)
process.draw_blobs(left_eye_frame, left_keypoints)
if self.right_can_play:
t2 = threading.Thread(target=self.sound_play_async, kwargs={'notif':True},name= 'notif')
t2.start()
sleep(.5)
self.is_right_detected = True
self.right_can_play = False
left_eye_frame = np.require(left_eye_frame, np.uint8, 'C')
self.display_image(left_eye_frame, window='left')
msg = ''
if self.is_left_detected:
msg = 'left eye'
if self.is_right_detected:
msg = 'right eye'
if self.is_right_detected and self.is_left_detected:
msg = 'left and right eyes'
if msg:
msg = msg + ' detected'
t1 = threading.Thread(target=self.sound_play_async, kwargs={'dis': distance, 'msg': msg})
t1.start()
sleep(.5)
self.is_right_detected = False
self.is_left_detected = False
if self.pupilsCheckbox.isChecked(): # draws keypoints on pupils on main window
self.display_image(base_image)
def sound_play_async(self, msg=None, notif=None, sleep_time=0, say_distance=True, dis='0'):
if sleep_time == 14:
sleep_time = 15
sleep(sleep_time)
if not notif:
self.engine.say(msg if sleep_time == 0 else '{} in {} seconds'.format(msg, sleep_time))
self.engine.runAndWait()
if say_distance:
sleep(0.2)
self.engine.say('distance between webcam and you is about')
self.engine.say(dis)
self.engine.say('centimeters')
self.engine.runAndWait()
if sleep_time < 8:
self.sound_play_async(msg, notif, sleep_time + 7, say_distance=False)
else:
beep(sound=1)
def get_keypoints(self, frame, frame_gray, threshold, previous_keypoint, previous_area):
keypoints = process.process_eye(frame_gray, threshold, self.detector,
prevArea=previous_area)
if keypoints:
previous_keypoint = keypoints
previous_area = keypoints[0].size
else:
keypoints = previous_keypoint
return keypoints, previous_keypoint, previous_area
def display_image(self, img, window='main'):
# Makes OpenCV images displayable on PyQT, displays them
qformat = QImage.Format_Indexed8
if len(img.shape) == 3:
if img.shape[2] == 4: # RGBA
qformat = QImage.Format_RGBA8888
else: # RGB
qformat = QImage.Format_RGB888
out_image = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat) # BGR to RGB
out_image = out_image.rgbSwapped()
if window == 'main': # main window
self.baseImage.setPixmap(QPixmap.fromImage(out_image))
self.baseImage.setScaledContents(True)
if window == 'left': # left eye window
self.leftEyeBox.setPixmap(QPixmap.fromImage(out_image))
self.leftEyeBox.setScaledContents(True)
if window == 'right': # right eye window
self.rightEyeBox.setPixmap(QPixmap.fromImage(out_image))
self.rightEyeBox.setScaledContents(True)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Window()
window.setWindowTitle("Eye gaze Detection")
window.show()
sys.exit(app.exec_())
|
main.py
|
#! /usr/bin/python3
from flask import Flask
from flask import request
from flask import jsonify
from threading import Thread
import RPi.GPIO as GPIO
import pusherclient as PusherClient
from pusher import Pusher as PusherEvent
import ast
import os
import sys
import time
import pymysql.cursors
from datetime import datetime
import signal
##########################################################
# SETTING UP RASPBERRY PI PINS
# SETUP THE GPIO PINS TO BE USED
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
# SET PWM PIN syntax: GIOP.PWM(pinNo, frequency)
pwm = GPIO.PWM(11, 50)
# SETTING UP VARIABLES TO USE AS SERVO POSITIONS
# 2, 11 GIVES 180 DEGREE
# 2, 6 GIVES 90 DEGREE
servo_default_position = 2
servo_feeding_position = 6
# SET INITIAL POSITION OF THE SERVO TO DEFAULT
pwm.start(5)
pwm.ChangeDutyCycle(servo_default_position)
def device_feed():
# MOVE THE SERVO TO FEEDING POSITION
pwm.ChangeDutyCycle(servo_feeding_position)
time.sleep(0.5)
# MOVE THE SERVO TO DEFAULT POSITION
pwm.ChangeDutyCycle(servo_default_position)
##########################################################
##########################################################
# DATABASE INITIALIZATION AND SETUP
connection = pymysql.connect(
host="localhost",
user="root",
password="karkhana",
db='raspberry_petfeed',
cursorclass=pymysql.cursors.DictCursor
)
##########################################################
##########################################################
# DEFINING FLASK THREAD FUNCTION THAT WILL HOLD THE FLASK
def flask_server():
# INITIALIZATOIN OF FLASK APP
app = Flask(__name__)
# ERROR RESPONSES
request_method_error = {
'connection': 'local',
'status': 'error',
'message': 'Error request type.'
}
@app.route('/', methods=['GET', 'POST'])
def index():
response = {
'connection': 'local',
'status': 'online'
}
if request.method == 'GET' or request.method == 'POST':
return jsonify(response)
# SETTING UP THE FEEDING ROUTE
@app.route('/feed', methods=['GET', 'POST'])
def feed():
if request.method == 'GET' or request.method == 'POST':
device_feed()
response = {
'connection': 'local',
'status': 'success',
'message': 'Feeding completed successfully.'
}
return jsonify(response)
else:
response = request_method_error
return jsonify(response)
# SETTING UP WIFI SETUP ROUTE
@app.route('/wifisetup', methods=['GET', 'POST'])
def wifiSetup():
# ERROR FLAG IS SET SO THAT WPA SUPPLICANT FILE ISN'T WRITTEN DURING ERROR
error_flag = False
ssid = ''
key = ''
if request.method == 'GET':
ssid = request.args.get('ssid')
key = request.args.get('key')
elif request.method == 'POST':
ssid = request.form['ssid']
key = request.form['key']
else:
response = request_method_error
return jsonify(response)
# CHECK IF SSID IS EMPTY OR NOT, IF EMPTY RETURN ERROR
if str(ssid) == 'None' or ssid == '':
response = {
'connection': 'local',
'status': 'error',
'message': 'SSID can\'t be empty.'
}
error_flag = True
return jsonify(response)
# CHECK IF KEY IS EMPTY OR NOT, IF EMPTY SET PASSWORD FLAG TRUE
if str(key) == 'None' or key == '':
password_flag = False
else:
password_flag = True
# IF NO ERROR OPEN THE WPA SUPPLICANT FILE AND ADD THE WIFI NETWORK
if error_flag is False:
# CHANGE DIRECTORY TO /etc/wpa_supplicant WHERE THE SUPPLICANT FILE IS PLACED
os.chdir('/etc/wpa_supplicant')
wpa_file = open("wpa_supplicant.conf", 'a')
print(wpa_file)
# IF PASSWORD IS NONE key_mgmt IS SET TO NONE
if password_flag is True:
new_network = """
network={
ssid=\"%s\"
psk=\"%s\"
}
""" % (ssid, key)
else:
new_network = """
network={
ssid=\"%s\"
key_mgmt=none
}
""" % (ssid)
try:
wpa_file.write(new_network)
wpa_file.close()
response = {
'connection': 'local',
'status': 'success',
'message': 'WIFI set successfully. Please restart device.'
}
return jsonify(response)
except:
response = {
'connection': 'local',
'status': 'error',
'message': 'There was an error trying to add wifi.'
}
return jsonify(response)
@app.route('/delete/wifi')
def deleteWifi():
os.chdir('/etc/wpa_supplicant/')
# os.chdir('/var/petfeed/')
wpa_file = open("wpa_supplicant.conf", 'w')
default_wpa = """
ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
ap_scan=1
update_config=1
network={
ssid=\"PetFeed\"
psk=\"petfeed123\"
priority=1
}
"""
wpa_file.write(default_wpa)
wpa_file.close()
response = {
'connection': 'local',
'status': 'success',
'message': 'WIFI set to default.'
}
return jsonify(response)
@app.route('/set/user', methods=['GET', 'POST'])
def setupUser():
if request.method == 'GET':
email = request.args.get('email')
if email is None:
return {
'status': 'error',
'message': 'email field is required'
}
elif request.method == 'post':
email = (request.form['email'])
if email is None:
return {
'status': 'error',
'message': 'email field is required'
}
else:
response = request_method_error
return jsonify(response)
with connection.cursor() as cursor:
try:
query = "DROP FROM users"
cursor.execute(query)
query = "DROP FROM schedules"
cursor.execute(query)
query = "INSERT INTO users(email) VALUES('%s')"
cursor.execute(query, email)
connection.commit()
response = {
'connection': 'local',
'status': 'success',
'message': 'User registered to the device successfully.'
}
return jsonify(response)
except:
connection.rollback()
response = {
'connection': 'local',
'status': 'error',
'message': 'There was an error trying to register user to the device.'
}
return jsonify(response)
@app.route('/restart')
def restart():
os.system("sudo reboot")
@app.route('/shutdown')
def shutdown():
os.system("sudo poweroff")
app.run('0.0.0.0', 80)
##########################################################
##########################################################
# PUSHER SERVER THREAD FUNCTION
def pusher_server():
# SETTING THE CHANNEL AND EVENT TO TRIGGER EACH TIME
event = 'App\Events\eventTrigger'
channel = 'petfeed'
# THE CALLBACK FUNCTION THAT WILL RUN AFTER EACH TRIGGER
def callback_function(data):
data = ast.literal_eval(data)
# IF THE KEY GET HAS STATUS RETURN THE STATUS OF DEVICE
if 'get' in data.keys():
with connection.cursor() as cursor:
query = "SELECT DISTINCT id, email FROM users WHERE email=%s"
try:
cursor.execute(query, data['user'])
user = cursor.fetchone()
except:
user = None
if user is not None:
if data['get'] == 'status':
pusherEvent.trigger(channel, event, {
'connection': 'global',
'user': user['email'],
'message': 'Device is running perfectly fine.',
'status': 'online'
})
elif data['get'] == 'schedule':
with connection.cursor() as cursor:
try:
query = "SELECT * FROM schedules WHERE user_id=%s"
cursor.execute(query, user['id'])
schedules_result = cursor.fetchall()
schedules = []
for s in schedules_result:
scheduled_day = s['day']
scheduled_time = datetime.strftime(s['time'], "%H:%M")
schedules.append({
"day": scheduled_day,
"time": scheduled_time
})
pusherEvent.trigger(channel, event, {
'connection': 'global',
'user': user['email'],
'data': schedules,
'status': 'Success'
})
except:
schedules = []
pusherEvent.trigger(channel, event, {
'connection': 'global',
'user': user['email'],
'data': schedules,
'status': 'error',
'message': 'Could not find schedules for specified user. (Schedules not set yet)'
})
elif data['get'] == 'restart':
pusherEvent.trigger(channel, event, {
'connection': 'global',
'user': user['email'],
'status': 'restarting',
'message': 'Restarting your device.'
})
os.system('sudo restart')
elif data['get'] == 'shutdown':
pusherEvent.trigger(channel, event, {
'connection': 'global',
'user': user['email'],
'status': 'shuttingdown',
'message': 'Shutting down your device.'
})
os.system('sudo shutdown')
else:
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'invalid get'
})
else:
pusherEvent.trigger(channel, event, {
'status': 'error',
'message': 'No device bound to the specified user.'
})
# IF THE KEY FEED HAS THE VALUE FEED, FEED THE PET AND RETURN THE STATUS
elif 'feed' in data.keys():
try:
user = data['user']
with connection.cursor() as cursor:
query = "SELECT DISTINCT id, email FROM users WHERE email=%s"
cursor.execute(query, user)
user_result = cursor.fetchone()
if user_result is None:
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'Specified user not registered to the device.'
})
else:
if data['feed'] == 'treat':
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'online',
'user': user_result['email'],
'message': 'Feeding your pet, please wait.'
})
device_feed()
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'online',
'user': user_result['email'],
'message': 'Feeding completed successfully.'
})
else:
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'invalid value for feed:[]'
})
except:
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'The user field isn\'t set'
})
elif 'set' in data.keys():
if data['set'] == 'schedule':
try:
user = data['user']
with connection.cursor() as cursor:
query = "SELECT DISTINCT id, email FROM users WHERE email=%s"
cursor.execute(query, user)
user_result = cursor.fetchone()
if user_result is None:
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'Specified user not registered to the device.'
})
else:
if data['data'] is not None:
for schedule in data['data']:
day = schedule['day']
feed_time = schedule['time']
feed_time = datetime.strptime(feed_time, "%H:%M")
sql = "INSERT INTO schedules (day, time, user_id) VALUES (%s, %s, %s)"
cursor.execute(sql, (day, feed_time, user_result['id']))
connection.commit()
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'success',
'message': 'Your schedule was added successfully.'
})
else:
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'Empty data recieved.'
})
except:
with connection.cursor as cursor:
cursor.rollback()
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'Internal error occurred while adding schedule'
})
elif data['schedule'] == 'update':
with connection.cursor() as cursor:
try:
user = data['user']
query = "SELECT DISTINCT id, email FROM users WHERE email=%s"
cursor.execute(query, user)
user_result = cursor.fetchone()
if user_result is None:
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'Specified user not registered to the device.'
})
else:
query = "DELETE FROM schedules"
cursor.execute(query)
for schedule in data['data']:
day = schedule['day']
feed_time = schedule['time']
feed_time = datetime.strptime(feed_time, "%H:%M")
sql = "INSERT INTO schedules (day, time, user_id) VALUES (%s, %s, %s)"
cursor.execute(sql, (day, feed_time, user_result['id']))
connection.commit()
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'success',
'message': 'Your schedule was updated successfully.'
})
except:
connection.rollback()
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'Internal error occurred while updating schedule'
})
def connect_handler(data):
petfeed_channel = pusherClient.subscribe(channel)
petfeed_channel.bind(event, callback_function)
pusherClient = PusherClient.Pusher(key='0053280ec440a78036bc', secret='7bbae18dfe3989d432a6')
pusherClient.connection.bind('pusher:connection_established', connect_handler)
pusherClient.connect()
pusherEvent = PusherEvent(app_id="440480", key="0053280ec440a78036bc", secret="7bbae18dfe3989d432a6",
cluster="mt1")
while True:
time.sleep(1)
##########################################################
##########################################################
# THREAD THAT RUNS SCHEDULING TASKS
# NEED TO ADD LOGIC WHICH RUNS THE DEVICE ON SCHEDULE
def scheduled_task():
channel = "petfeed"
event = "Need to add one later"
pusherEvent = PusherEvent(app_id="440480", key="0053280ec440a78036bc",
secret="7bbae18dfe3989d432a6", cluster="mt1")
try:
#print("Inside scheduled task")
while 1:
today_day = datetime.now().strftime("%A")
today_time = datetime.now().strftime("%H:%M:%S")
print(today_day + ' ' + today_time)
with connection.cursor() as cursor:
query = "SELECT * FROM schedules WHERE day=%s AND time=%s"
cursor.execute(query, (today_day, today_time))
schedule = cursor.fetchone()
if schedule is not None:
scheduled_time = today_time
#print("inside found schedule")
#break
# CALL THE DEVICE FEED FUNCTION THAT CONTROLS THE PI SERVO
device_feed()
user_id = schedule['user_id']
query = "SELECT DISTINCT email, id FROM users WHERE id = %s"
cursor.execute(query, user_id)
user = cursor.fetchone()
pusherEvent.trigger(channel, event, {
'user': user['email'],
'status': 'success',
'data': {
'feeding_date': scheduled_time,
'user': user['email'],
}
})
time.sleep(1);
except:
pusherEvent.trigger(channel, event, {
'connection': 'global',
'status': 'error',
'message': 'Internal error occurred while reading schedule'
})
##########################################################
##########################################################
# MAIN SCRIPT RUNS HERE
if __name__ == '__main__':
flask_thread = Thread(target=flask_server)
flask_thread.start()
pusher_thread = Thread(target=pusher_server)
pusher_thread.start()
scheduled_thread = Thread(target=scheduled_task)
scheduled_thread.start()
scheduled_thread.join()
pusher_thread.join()
flask_thread.join()
signal.signal(signal.SIGTERM, pwm.cleanup())
##########################################################
|
pulsar-test.py
|
#!/usr/bin/env python
import pulsar
import sys
import time
import subprocess
from datetime import datetime
import threading
from collections import defaultdict
import re
from gather_info_functions import *
def log(text, to_file=False):
global output_file
time_now = datetime.now().strftime('%H:%M:%S')
print(text)
if to_file:
output_file.write(f"{time_now}: {text}\n")
def write_duplicate(line):
global dupl_file
dupl_file.write(line + '\n')
def write_out_of_order(line):
global out_of_order_file
out_of_order_file.write(line + '\n')
def create_cluster(e, qw, qa, brokers, bookies, deduplication_enabled):
subprocess.call(["./setup-test-run.sh", e, qw, qa, brokers, bookies, deduplication_enabled])
def get_entry(msg_id):
id = str(msg_id) .replace("(", "").replace(")", "").split(",")
first = int(id[0])
entry = int(id[1])
return [first, entry]
def is_same_entry(last_entry, current_entry):
return last_entry[0] == current_entry[0] and last_entry[1] == current_entry[1]
def get_kill_count():
global chaos_action
# format is action[count]
m = re.search('.+\[(.+?)\]$', chaos_action)
if m:
return m.group(1)
else:
log("Could not identify kill count")
sys.exit(1)
def get_isolate_from_zk_partitions(target_node, topic):
partition1 = list()
partition2 = list()
for node in get_live_nodes():
if node != "zk1":
partition1.append(node)
for node in get_live_nodes():
if node != target_node:
partition2.append(node)
partition1_arg = ",".join(partition1)
partition2_arg = ",".join(partition2)
return [target_node, partition1_arg, partition2_arg]
def get_custom_partitions():
global chaos_action, partitions
# format is custom-isolation(node,node|node,node|..)
m = re.search('^custom-isolation\[(.+?)\]$', chaos_action)
if m:
p_text = m.group(1)
parts = p_text.split('|')
for i in range(0, len(parts)):
partitions.append(parts[i])
else:
log("Could not identify custom partitions in supplied argument")
sys.exit(1)
def execute_chaos_action(topic, chaos_action, partitions):
if chaos_action == "isolate-broker-from-zk" or chaos_action == "isolate-bookie-from-zk":
subprocess.call(["./execute-chaos.sh", chaos_action, topic, partitions[0], partitions[1], partitions[2]])
elif chaos_action.startswith("custom-isolation"):
if len(partitions) == 0:
log("No custom partition supplied")
sys.exit(1)
parts = " ".join(partitions)
subprocess.call(["./execute-chaos.sh", "custom-isolation", topic, parts])
elif chaos_action.startswith("kill-bookies"):
kill_count = get_kill_count()
subprocess.call(["./execute-chaos.sh", "kill-bookies", topic, kill_count])
else:
subprocess.call(["./execute-chaos.sh", chaos_action, topic])
def send_callback(res, msg):
global messages_pos_acked, messages_neg_acked, send_count, ack_count, pos_ack_count, neg_ack_count, action_mark, action_performed, chaos_action, topic, partitions
ack_count += 1
if str(res) == "Ok":
pos_ack_count += 1
value = int(msg.data())
messages_pos_acked.add(value)
else:
neg_ack_count += 1
value = int(msg.data())
messages_neg_acked.add(value)
if ack_count % 50000 == 0:
log(f"Send count: {str(send_count)} Ack count: {str(ack_count)} Pos: {str(pos_ack_count)} Neg: {str(neg_ack_count)}")
if ack_count > action_mark and action_performed == False:
action_performed = True
r = threading.Thread(target=execute_chaos_action,args=(topic, chaos_action, partitions))
r.start()
def produce(producer):
global send_count, ack_count, pos_ack_count, neg_ack_count, chaos_action, partitions
# send the first message synchronously, to ensure everything is running ok
producer.send(str(send_count).encode('utf-8'))
messages_pos_acked.add(send_count)
send_count += 1
ack_count += 1
pos_ack_count += 1
# perform slow to gather information now before fast message producing
if chaos_action == "isolate-broker-from-zk":
owner = get_owner_broker(topic)
partitions = get_isolate_from_zk_partitions(owner, topic)
elif chaos_action == "isolate-bookie-from-zk":
bookie = get_bookie_in_first_ledger()
partitions = get_isolate_from_zk_partitions(bookie, topic)
elif chaos_action.startswith("custom-isolation"):
get_custom_partitions()
# send bulk of messages asynchronously in order to achieve high message rate
while send_count < count-1:
if send_count - ack_count >= 10000: # ensure we don't have more than 10k in flight at a time
time.sleep(0.1)
else:
producer.send_async(str(send_count).encode('utf-8'), send_callback)
messages_sent[send_count] = list()
send_count += 1
# send last message in order to block until acked
# this way we ensure all messages are acked by the end of this function
producer.send(str(send_count).encode('utf-8'))
messages_pos_acked.add(send_count)
send_count += 1
ack_count += 1
pos_ack_count += 1
log(f"Send count: {str(send_count)} Ack count: {str(ack_count)} Pos: {str(pos_ack_count)} Neg: {str(neg_ack_count)}")
def read(reader):
global out_of_order, received_count, duplicate, messages_sent, test_run, topic
last_confirmed = get_last_confirmed_entry(topic)
log(f"Last confirmed entry: {last_confirmed}")
msg = reader.read_next()
msg_id = msg.message_id()
msg_entry = get_entry(msg_id)
current_payload = int(msg.data())
lastMsg = msg
received_count = 1
last_payload = current_payload
messages_sent[last_payload].append(msg_id)
reader_timeout = 10000
log(f"Start reading from {msg_id}")
while True:
try:
msg = reader.read_next(reader_timeout)
received_count += 1
msg_id = msg.message_id()
msg_entry = get_entry(msg_id)
# lower the wait time towards the end
if is_same_entry(last_confirmed, msg_entry):
reader_timeout = 10000
else:
reader_timeout = 60000
current_payload = int(msg.data())
messages_sent[current_payload].append(msg_id)
if received_count % 50000 == 0:
log(f"Received: {received_count} Curr Entry: {msg_entry}")
if last_payload >= current_payload:
line = f"{test_run}|{lastMsg.message_id()}|{str(last_payload)}|{msg_id}|{current_payload}"
if len(messages_sent[current_payload]) > 1:
duplicate += 1
write_duplicate(line)
else:
out_of_order += 1
write_out_of_order(line)
last_payload = current_payload
lastMsg = msg
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:{1!r}"
message = template.format(type(ex).__name__, ex.args)
if 'Pulsar error: TimeOut' in message:
break
else:
log(message)
log(f"Read phase complete with message {msg.message_id()}")
def show_help():
f=open("help", "r")
contents =f.read()
print(contents)
chaos_action = sys.argv[1]
if chaos_action == "help":
show_help()
sys.exit(0)
topic_prefix = sys.argv[2]
test_num = int(sys.argv[3])
count = int(sys.argv[4])
action_mark = int(sys.argv[5])
bk_config = sys.argv[6].split('-')
ensemble_size = bk_config[0]
write_quorum = bk_config[1]
ack_quorum = bk_config[2]
node_counts = sys.argv[7].split('-')
brokers = node_counts[0]
bookies = node_counts[1]
deduplication_enabled = sys.argv[8]
test_run = 1
# create log files
start_time = datetime.now().strftime('%Y%m%d_%H:%M:%S')
output_file_w = open(f"test-output/{topic_prefix}_output.txt", "w")
output_file_w.write(f"{start_time} Start test\n")
dupl_file_w = open(f"test-output/{topic_prefix}_duplicates.txt", "w")
dupl_file_w.write("run|last_msg_id|last_value|curr_msg_id|curr_value\n")
out_of_order_file_w = open(f"test-output/{topic_prefix}_out_of_order.txt", "w")
out_of_order_file_w.write("run|last_msg_id|last_value|curr_msg_id|curr_value\n")
output_file = open(f"test-output/{topic_prefix}_output.txt", "a")
out_of_order_file = open(f"test-output/{topic_prefix}_out_of_order.txt", "a")
dupl_file = open(f"test-output/{topic_prefix}_duplicates.txt", "a")
while test_run <= test_num:
# prepare cluster phase ---------------
topic = topic_prefix + "_" + str(test_run)
create_cluster(ensemble_size, write_quorum, ack_quorum, brokers, bookies, deduplication_enabled)
# run test
send_count = 0
ack_count = 0
pos_ack_count = 0
neg_ack_count = 0
action_performed = False
log(f"", True)
log(f"Test Run #{test_run} on topic {topic} ------------", True)
# - CHAOS VARIABLES
partitions = list()
# - WRITE PHASE --------------------
log("-------------------------------------------------")
log("WRITE PHASE")
log("-------------------------------------------------")
proxy_ip = get_proxy_ip()
messages_sent = defaultdict(list)
messages_pos_acked = set()
messages_neg_acked = set()
send_timeout = 30000
if deduplication_enabled == "true":
send_timeout = 0
client = pulsar.Client(f'pulsar://{proxy_ip}:6650')
producer = client.create_producer(f'persistent://vanlightly/cluster-1/ns1/{topic}',
block_if_queue_full=True,
batching_enabled=True,
batching_max_publish_delay_ms=10,
max_pending_messages=1000000, #avoid producer slowdown after broker fail-overs
send_timeout_millis=send_timeout,
properties={
"producer-name": "test-producer-name",
"producer-id": "test-producer-id"
})
try:
produce(producer)
except KeyboardInterrupt:
log("Producer cancelled")
sys.exit(1)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:{1!r}"
message = template.format(type(ex).__name__, ex.args)
log("The producer has failed!!!")
log(message)
sys.exit(1)
finally:
client.close()
# - READ PHASE --------------------
time.sleep(10)
log("-------------------------------------------------")
log("READ PHASE")
log("-------------------------------------------------")
received_count = 0
out_of_order = 0
duplicate = 0
message_id = pulsar.MessageId.earliest
conn_attempts = 1
while True:
proxy_ip = get_proxy_ip()
try:
client = pulsar.Client(f'pulsar://{proxy_ip}:6650')
reader = client.create_reader(f'persistent://vanlightly/cluster-1/ns1/{topic}', message_id)
break
except Exception:
if conn_attempts > 3:
log("Could not connect, aborting test run")
sys.exit(1)
else:
log("Failed to connect, will retry")
conn_attempts += 1
try:
read(reader)
except KeyboardInterrupt:
log("Reader cancelled")
sys.exit(1)
finally:
client.close()
not_received = 0
received_no_ack = 0
msgs_with_dups = 0
received = 0
for msg_val, msg_ids in messages_sent.items():
received += len(msg_ids)
if len(msg_ids) == 0 and msg_val in messages_pos_acked:
not_received += 1
elif len(msg_ids) == 1 and msg_val not in messages_pos_acked:
received_no_ack += 1
elif len(msg_ids) > 1:
msgs_with_dups += 1
log("Results --------------------------------------------", True)
log(f"Final send count: {str(send_count)}", True)
log(f"Final ack count: {str(ack_count)}", True)
log(f"Final positive ack count: {str(pos_ack_count)}", True)
log(f"Final negative ack count: {str(neg_ack_count)}", True)
log(f"Messages received: {str(received)}", True)
log(f"Acked messages missing: {str(not_received)}", True)
log(f"Non-acked messages received: {str(received_no_ack)}", True)
log(f"Out-of-order: {str(out_of_order)}", True)
log(f"Duplicates: {msgs_with_dups}", True)
log("----------------------------------------------------", True)
test_run += 1
|
app.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import cgi
import urlparse
import traceback
from threading import Thread
from SocketServer import ThreadingMixIn
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from django.utils.simplejson import JSONEncoder
from django.db.models.query import QuerySet
from rapidsms.apps.base import AppBase
from rapidsms.conf import settings
class App(AppBase):
"""
This App does nothing by itself. It exists only to serve other Apps,
by providing an easy (and standard) way for them to communicate
between their WebUI and RapidSMS App object.
When RapidSMS starts, this app starts an HTTPServer (port 8001 as
default, but configurable via settings.py) in a worker thread, and
watches for any incoming HTTP requests matching */app/method*. These
requests, along with their GET parameters and POST data, are passed
on to the named app.
Examples::
method URL app method args
====== === === ====== ====
GET /food/toast food ajax_GET_toast { }
POST /food/waffles food ajax_POST_waffles { }, { }
POST /food/eggs?x=1 food ajax_POST_eggs { "x": [1] }, { }
Any data that is returned by the handler method is JSON encoded, and
sent back to the WebUI in response. Since RapidSMS includes jQuery
with every view, this makes it very easy for apps to query their
running App object for state. See the _httptester_ for an example.
But wait! AJAX can't cross domains, so a request to port 8001 from
the WebUI won't work! This is handled by the WebUI bundled with this
app, that proxies all requests to /ajax/(.+) to the right place, on
the server side. I cannot conceive of a situation where this would
be a problem - but keep it in mind, and don't forget to prepend
"/ajax/" to your AJAX URLs.
"""
class Server(ThreadingMixIn, HTTPServer):
pass
class MyJsonEncoder(JSONEncoder):
def default(self, o):
# if this object has its own JSON serializer, use it
if hasattr(o, "__json__"):
return o.__json__()
elif type(o) == QuerySet:
return list(o)
# otherwise, revert to the usual behavior
return JSONEncoder.default(self, o)
class RequestHandler(BaseHTTPRequestHandler):
def _find_app(self, name):
for app in self.server.app.router.apps:
if app.name == name:
return app
def _charset(self, str):
"""
Extract and return the charset argument from an HTTP
content-type header, or None if it was not found.
"""
x = str.split("charset=", 1)
return x[1] if(len(x) == 2) else None
# handle GET and POST with the same method
def do_GET(self): return self.process()
def do_POST(self): return self.process()
def process(self):
def response(code, output, json=True):
self.send_response(code)
mime_type = "application/json" if json else "text/plain"
self.send_header("content-type", mime_type)
self.end_headers()
if json:
json = App.MyJsonEncoder().encode(output)
self.wfile.write(json)
# otherwise, write the raw response. it doesn't make
# much sense to have error messages encoded as JSON.
else: self.wfile.write(output)
# HTTP2xx represents success
return (code>=200 and code <=299)
# should look something like:
# /alpha/bravo?charlie=delta
#
# this request will be parsed to the "bravo"
# method of the "alpha" app, with the params:
# { "charlie": ["delta"] }
#
# any other path format will return an http 404 error, for
# the time being. GET parameters are optional.
url = urlparse.urlparse(self.path)
path_parts = url.path.split("/")
# abort if the url didn't look right
if len(path_parts) != 3:
str_ = "Malformed URL: %s" % url
self.server.app.warning(str_)
return response(404, str_)
# resolve the first part of the url into an app (via the
# router), and abort if it wasn't valid
app_name = path_parts[1]
app = self._find_app(app_name)
if (app is None):
str_ = "Invalid app: %s" % app_name
self.server.app.warning(str_)
return response(404, str_)
# same for the request name within the app
meth_name = "ajax_%s_%s" % (self.command, path_parts[2])
if not hasattr(app, meth_name):
str_ = "Invalid method: %s.%s" %\
(app.__class__.__name__, meth_name)
self.server.app.warning(str_)
return response(404, str_)
# everything appears to be well, so call the target method,
# and return the response (as a string, for now)
try:
method = getattr(app, meth_name)
args = [cgi.parse_qs(url.query)]
# for post requests, we'll also need to parse the form
# data and hand it along to the method
if self.command == "POST":
content_type = self.headers["content-type"]
form = {}
# parse the form data via the CGI lib. this is a
# horrible mess, but supports all kinds of encodings
# that we may encounter. (multipart, in particular.)
storage = cgi.FieldStorage(
fp = self.rfile,
headers = self.headers,
environ = {
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": content_type })
# extract the charset from the content-type header,
# which should have been passed along in views.py
charset = self._charset(content_type)
# convert the fieldstorage object into a dict, to
# keep it simple for the handler methods. TODO: make
# this a util, if it's useful elsewhere.
for key in storage.keys():
# convert each of the values with this key into
# unicode, respecting the content-type that the
# request _claims_ to be currently encoded with
val = [
unicode(v, charset)
for v in storage.getlist(key)]
# where possible, store the values as singular,
# to avoid CGI's usual post["id"][0] verbosity
form[key] = val[0] if(len(val) == 1) else val
args.append(form)
self.server.app.debug(
"Calling %s.%s with args: %s" %
(app.__class__.__name__, meth_name, args))
output = method(*args)
self.server.app.debug("Response: %s" % output)
return response(200, output)
# something raised during the request, so return a useless
# http error to the requester
except Exception, err:
self.server.app.warning(traceback.format_exc())
return response(500, unicode(err), False)
# this does nothing, except prevent the incoming http requests
# from being echoed to the screen (which screws up the log)
def log_request(*args):
pass
def start(self):
# create the webserver, through which the AJAX requests from the
# WebUI will arrive (via utils.py)
self.server = self.Server((
settings.AJAX_PROXY_HOST,
settings.AJAX_PROXY_PORT),
self.RequestHandler)
# allow the server to call back the app
self.server.app = self
# start the server in a separate thread, and daemonize it to
# prevent it from hanging once the main thread terminates
self.thread = Thread(target=self.server.serve_forever)
self.thread.setDaemon(True)
self.thread.start()
|
train_pg.py
|
#Reference:
#1. https://github.com/mabirck/CS294-DeepRL/blob/master/lectures/class-5/REINFORCE.py
#2. https://github.com/JamesChuanggg/pytorch-REINFORCE/blob/master/reinforce_continuous.py
#3. https://github.com/pytorch/examples/blob/master/reinforcement_learning/actor_critic.py
# With the help from the implementations above, I was finally able to translate the provided skeleton code in Tensorflow into the code below
import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data_utils
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import StepLR
from torch.distributions import Categorical
from torch.distributions.multivariate_normal import MultivariateNormal
import torch.nn.init as ini
import random
#============================================================================================#
# Utilities
#============================================================================================#
class Policy_discrete(nn.Module):
def __init__(self, inputdim, outputdim, n_layers, hiddendim, activation, output_activation):
super(Policy_discrete, self).__init__()
if (output_activation==None):
self.original_output=True
else:
self.original_output=False
self.history_of_log_probs=[]
self.layers=nn.ModuleList()
for i in range(n_layers):
if(i==0):
self.layers.append(nn.Linear(inputdim, hiddendim))
self.layers.append(activation)
elif(i==(n_layers-1)):
self.layers.append(nn.Linear(hiddendim, outputdim))
if(output_activation!=None):
self.layers.append(output_activation)
else:
self.layers.append(nn.Linear(hiddendim, hiddendim))
self.layers.append(activation)
def forward(self, x):
for i, l in enumerate(self.layers):
x=l(x)
return x
def run(self, x):
x=Variable(Tensor(x))
p=self(x)
if self.original_output:
d=Categorical(logits=p)
else:
#Suppose after the output_activation, we get the probability(i.e. a softmax activation)
#This assumption might be false.
d=Categorical(probs=p)
action=d.sample()
self.history_of_log_probs.append(d.log_prob(action))
return action #haven't checked the type of action, might be buggy here
def learn(self, optimizer, history_of_rewards, gamma, reward_to_go):
total_weighted_reward=Variable(torch.zeros(1,1))
gradient=Variable(torch.zeros(1,1))
loss=0
if !reward_to_go:
#sum up all the reward along the trajectory
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward = gamma * total_weighted_reward + rewards[i]
gradient+=self.history_of_log_probs[i]
loss=loss-gradient*total_weighted_reward
loss=loss/len(history_of_rewards) #in case the episode terminates early
else:
#reward to go mode
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward=gamma*total_weighted_reward+rewards[i]
loss=loss-self.history_of_log_probs[i]*total_weighted_reward
loss=loss/len(history_of_rewards) #in case the episode terminates early
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.history_of_log_probs=[]
class Policy_continuous_hw(nn.Module): #this policy network only outputs the mean of the Gaussian
def __init__(self, inputdim, outputdim, n_layers, hiddendim, activation, output_activation):
super(Policy_continuous_mean_only, self).__init__()
if (output_activation==None):
self.original_output=True
else:
self.original_output=False
self.history_of_log_probs=[]
self.logstd_raw=nn.Parameter(torch.ones(outputdim), requires_grad=True)
self.outputid=Variable(torch.eyes(outputdim), requires_grad=False)
self.layers=nn.ModuleList()
for i in range(n_layers):
if(i==0):
self.layers.append(nn.Linear(inputdim, hiddendim))
self.layers.append(activation)
elif(i==(n_layers-1)):
self.layers.append(nn.Linear(hiddendim, outputdim))
if(output_activation!=None):
self.layers.append(output_activation)
else:
self.layers.append(nn.Linear(hiddendim, hiddendim))
self.layers.append(activation)
def forward(self, x):
for i, l in enumerate(self.layers):
x=l(x)
return x
def run(self, x):
x=Variable(Tensor(x))
#the action space is continuous
u=self(x)
sigma2=torch.exp(self.logstd_raw)*self.outputid
d=MultivariateNormal(u, sigma2)
action=d.sample()
self.history_of_log_probs.append(d.log_prob(action))
return action
def learn(self, optimizer, history_of_rewards, gamma, reward_to_go):
total_weighted_reward=Variable(torch.zeros(1,1))
gradient=Variable(torch.zeros(1,1))
loss=0
if !reward_to_go:
#sum up all the reward along the trajectory
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward = gamma * total_weighted_reward + rewards[i]
gradient+=self.history_of_log_probs[i]
loss=loss-(gradient*total_weighted_reward.expand(gradient.size())).sum()
loss=loss/len(history_of_rewards) #in case the episode terminates early
else:
#reward to go mode
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward=gamma*total_weighted_reward+rewards[i]
loss=loss-(self.history_of_log_probs[i]*total_weighted_reward.expand(self.history_of_log_probs[i].size())).sum()
loss=loss/len(history_of_rewards) #in case the episode terminates early
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.history_of_log_probs=[]
class Critic(nn.Module): #Critic is always discrete
def __init__(self, inputdim, outputdim, n_layers, hiddendim, activation, output_activation):
super(Critic, self).__init__()
if (output_activation==None):
self.original_output=True
else:
self.original_output=False
self.history_of_values=[]
self.layers=nn.ModuleList()
for i in range(n_layers):
if(i==0):
self.layers.append(nn.Linear(inputdim, hiddendim))
self.layers.append(activation)
elif(i==(n_layers-1)):
self.layers.append(nn.Linear(hiddendim, outputdim))
if(output_activation!=None):
self.layers.append(output_activation)
else:
self.layers.append(nn.Linear(hiddendim, hiddendim))
self.layers.append(activation)
def forward(self, x):
for i, l in enumerate(self.layers):
x=l(x)
return x
def run(self, x):
x=Variable(Tensor(x))
v=self(x)
self.history_of_values.append(v)
return v #haven't checked the type of value, might be buggy here
def learn(self, optimizer, history_of_rewards, gamma):
total_weighted_reward=0
gradient=Variable(torch.zeros(1,1))
loss=0
history_of_total_weighted_reward=[]
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward=gamma*total_weighted_reward+history_of_rewards[i]
history_of_total_weighted_reward.insert(0,total_weighted_reward)
history_of_total_weighted_reward=torch.tensor(history_of_total_weighted_reward)
#rescale the reward value(do not want to compute raw Q value)
reward_u=history_of_total_weighted_reward.mean()
reward_std=history_of_total_weighted_reward.std()+1e-8
history_of_total_weighted_reward=(history_of_total_weighted_reward-reward_u)/reward_std
for i in range(len(self.history_of_values)):
loss+=F.mse_loss(history_of_values[i], history_of_weighted_reward[i])
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.history_of_values=[]
class Policy_continuous(nn.Module):
def __init__(self, inputdim, outputdim, n_layers, hiddendim, activation, output_activation):
super(Policy_continuous, self).__init__()
if (output_activation==None):
self.original_output=True
else:
self.original_output=False
self.history_of_log_probs=[]
self.layers=nn.ModuleList()
for i in range(n_layers-1):
if(i==0):
self.layers.append(nn.Linear(inputdim, hiddendim))
self.layers.append(activation)
else:
self.layers.append(nn.Linear(hiddendim, hiddendim))
self.layers.append(activation)
self.mean=nn.Linear(hiddendim, outputdim)
self.logstd_raw=nn.Linear(hiddendim, outputdim)
self.outputid=Variable(torch.eyes(outputdim), requires_grad=False)
if output_activation!=None:
self.layers.append(output_activation)
def forward(self, x):
for i, l in enumerate(self.layers):
if (i!=(len(self.layers)-1)):
x=l(x)
u=self.mean(x)
logstd=self.logstd_raw(x)
if(!self.original_output):
u=self.output_activation(u)
logstd=self.output_activation(logstd)
return u, logstd
def run(self, x):
x=Variable(Tensor(x))
u, logstd=self(x)
sigma2=torch.exp(2*logstd)*self.output_id
d=MultivariateNormal(u, sigma2) #might want to use N Gaussian instead
action=d.sample()
self.history_of_log_probs.append(d.log_prob(action))
return action
def learn(self, optimizer, history_of_rewards, gamma, reward_to_go):
total_weighted_reward=Variable(torch.zeros(1,1))
gradient=Variable(torch.zeros(1,1))
loss=0
if !reward_to_go:
#sum up all the reward along the trajectory
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward = gamma * total_weighted_reward + rewards[i]
gradient+=self.history_of_log_probs[i]
loss=loss-(gradient*total_weighted_reward.expand(gradient.size())).sum()
loss=loss/len(history_of_rewards) #in case the episode terminates early
else:
#reward to go mode
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward=gamma*total_weighted_reward+rewards[i]
loss=loss-(self.history_of_log_probs[i]*total_weighted_reward.expand(self.history_of_log_probs[i].size())).sum()
loss=loss/len(history_of_rewards) #in case the episode terminates early
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.history_of_log_probs=[]
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=torch.nn.Tanh,
output_activation=None,
discrete=True
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
if scope="nn_baseline":
print("critic activated.")
return Critic(input_placeholder, output_size, n_layers, size, activation, output_activation) #Critic is always discrete
else:
#return an actor
if discrete:
print("discrete-type actor activated.")
return Policy_discrete(input_placeholder, output_size, n_layers, size, activation, output_activation)
else:
print("continuous-type actor activated.")
return Policy_continuous_hw(input_placeholder, output_size, n_layers, size, activation, output_activation)
def pathlength(path):
return len(path["reward"])
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
torch.manual_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#create actor
actor=build_mlp(ob_dim, ac_dim, "actor", n_layers=n_layers, size=size, discrete=discrete)
actor_optimizer = optim.Adam(actor.parameters(), lr=3e-3)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
critic = build_mlp(ob_dim,
1,
"nn_baseline",
n_layers=n_layers,
size=size)
critic_optimizer=optim.Adam(critic.parameters(), lr=3e-3)
#todo: initilize actor and critic
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = actor.run(ob)
print("need to type-check action here:(two lines)")
print(ac)
print(ac.size())
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
#One episode finishes; perform update here
finish_episode(actor, actor_optimizer, critic=None, critic_optimizer=None, )
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.