source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
pushbullet.py | """
Pushbullet platform for sensor component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.pushbullet/
"""
import logging
import voluptuous as vol
from homeassistant.const import (CONF_API_KEY, CONF_MONITORED_CONDITIONS)
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pushbullet.py==0.11.0']
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
'application_name': ['Application name'],
'body': ['Body'],
'notification_id': ['Notification ID'],
'notification_tag': ['Notification tag'],
'package_name': ['Package name'],
'receiver_email': ['Receiver email'],
'sender_email': ['Sender email'],
'source_device_iden': ['Sender device ID'],
'title': ['Title'],
'type': ['Type'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=['title', 'body']):
vol.All(cv.ensure_list, vol.Length(min=1), [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Pushbullet Sensor platform."""
from pushbullet import PushBullet
from pushbullet import InvalidKeyError
try:
pushbullet = PushBullet(config.get(CONF_API_KEY))
except InvalidKeyError:
_LOGGER.error("Wrong API key for Pushbullet supplied")
return False
pbprovider = PushBulletNotificationProvider(pushbullet)
devices = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
devices.append(PushBulletNotificationSensor(pbprovider, sensor_type))
add_devices(devices)
class PushBulletNotificationSensor(Entity):
"""Representation of a Pushbullet Sensor."""
def __init__(self, pb, element):
"""Initialize the Pushbullet sensor."""
self.pushbullet = pb
self._element = element
self._state = None
self._state_attributes = None
def update(self):
"""Fetch the latest data from the sensor.
This will fetch the 'sensor reading' into self._state but also all
attributes into self._state_attributes.
"""
try:
self._state = self.pushbullet.data[self._element]
self._state_attributes = self.pushbullet.data
except (KeyError, TypeError):
pass
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format('Pushbullet', self._element)
@property
def state(self):
"""Return the current state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return all known attributes of the sensor."""
return self._state_attributes
class PushBulletNotificationProvider():
"""Provider for an account, leading to one or more sensors."""
def __init__(self, pb):
"""Start to retrieve pushes from the given Pushbullet instance."""
import threading
self.pushbullet = pb
self._data = None
self.listener = None
self.thread = threading.Thread(target=self.retrieve_pushes)
self.thread.daemon = True
self.thread.start()
def on_push(self, data):
"""Update the current data.
Currently only monitors pushes but might be extended to monitor
different kinds of Pushbullet events.
"""
if data['type'] == 'push':
self._data = data['push']
@property
def data(self):
"""Return the current data stored in the provider."""
return self._data
def retrieve_pushes(self):
"""Retrieve_pushes.
Spawn a new Listener and links it to self.on_push.
"""
from pushbullet import Listener
self.listener = Listener(account=self.pushbullet, on_push=self.on_push)
_LOGGER.debug("Getting pushes")
try:
self.listener.run_forever()
finally:
self.listener.close()
|
data_store_test.py | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""These are basic tests for the data store abstraction.
Implementations should be able to pass these tests to be conformant.
"""
import csv
import functools
import hashlib
import inspect
import logging
import operator
import os
import random
import string
import tempfile
import thread
import threading
import time
# pylint: disable=unused-import, g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import, g-bad-import-order
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import test_lib
from grr.lib import threadpool
from grr.lib import utils
from grr.lib import worker
def DeletionTest(f):
"""This indicates a test that uses deletion."""
@functools.wraps(f)
def Decorator(testinstance):
if testinstance.TEST_DELETION:
return f(testinstance)
else:
return testinstance.skipTest("Tests that use deletion are disabled "
"for this data store.")
return Decorator
def TransactionTest(f):
"""This indicates a test that uses transactions."""
@functools.wraps(f)
def Decorator(testinstance):
if testinstance.TEST_TRANSACTIONS:
return f(testinstance)
else:
return testinstance.skipTest("Tests that use transactions are disabled "
"for this data store.")
return Decorator
class _DataStoreTest(test_lib.GRRBaseTest):
"""Test the data store abstraction."""
test_row = "aff4:/row:foo"
# This flag controls if tests can also delete data. Some data stores don't
# support deletion so those tests will fail for them.
TEST_DELETION = True
# The same applies to transactions.
TEST_TRANSACTIONS = True
def setUp(self):
super(_DataStoreTest, self).setUp()
self.InitDatastore()
data_store.DB.DeleteSubject(self.test_row, token=self.token)
for i in range(20):
data_store.DB.DeleteSubject("aff4:/row:%s" % i, token=self.token)
data_store.DB.Flush()
self.acls_installed = False
def tearDown(self):
super(_DataStoreTest, self).tearDown()
self.DestroyDatastore()
def InitDatastore(self):
"""Initiates custom data store."""
def DestroyDatastore(self):
"""Destroys custom data store."""
def testSetResolve(self):
"""Test the Set() and Resolve() methods."""
predicate = "task:00000001"
value = rdfvalue.GrrMessage(session_id="session")
# Ensure that setting a value is immediately available.
data_store.DB.Set(self.test_row, predicate, value, token=self.token)
time.sleep(1)
data_store.DB.Set(self.test_row + "X", predicate, value, token=self.token)
(stored_proto, _) = data_store.DB.Resolve(
self.test_row, predicate, token=self.token)
stored_proto = rdfvalue.GrrMessage(stored_proto)
self.assertEqual(stored_proto.session_id, value.session_id)
def testMultiSet(self):
"""Test the MultiSet() methods."""
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(self.test_row,
{"aff4:size": [1],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]},
token=self.token)
(stored, _) = data_store.DB.Resolve(self.test_row, "aff4:size",
token=self.token)
self.assertEqual(stored, 1)
(stored, _) = data_store.DB.Resolve(self.test_row, "aff4:stored",
token=self.token)
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
(stored, _) = data_store.DB.Resolve(self.test_row, "aff4:unknown_attribute",
token=self.token)
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
# Test setting values with timestamp.
data_store.DB.MultiSet(self.test_row,
{"aff4:size": [(1, 100)],
"aff4:stored": [(unicode_string, 200)]},
token=self.token)
(stored, ts) = data_store.DB.Resolve(self.test_row, "aff4:size",
token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 100)
(stored, ts) = data_store.DB.Resolve(self.test_row, "aff4:stored",
token=self.token)
self.assertEqual(stored, unicode_string)
self.assertEqual(ts, 200)
# Test giving a broken timestamp definition.
with test_lib.FakeTime(555):
data_store.DB.MultiSet(self.test_row,
{"aff4:size": [(1, None)],
"aff4:stored": [(unicode_string, 200)]},
token=self.token)
(stored, ts) = data_store.DB.Resolve(self.test_row, "aff4:size",
token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 555000000)
(stored, ts) = data_store.DB.Resolve(self.test_row, "aff4:stored",
token=self.token)
self.assertEqual(stored, unicode_string)
self.assertEqual(ts, 200)
def testMultiSetAsync(self):
"""Test the async MultiSet() methods."""
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(self.test_row,
{"aff4:size": [3],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]},
sync=False, token=self.token)
# Force the flusher thread to flush.
data_store.DB.flusher_thread.target()
(stored, _) = data_store.DB.Resolve(self.test_row, "aff4:size",
token=self.token)
self.assertEqual(stored, 3)
(stored, _) = data_store.DB.Resolve(self.test_row, "aff4:stored",
token=self.token)
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
(stored, _) = data_store.DB.Resolve(self.test_row, "aff4:unknown_attribute",
token=self.token)
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
def testMultiSet2(self):
"""Test the MultiSet() methods."""
# Specify a per element timestamp
data_store.DB.MultiSet(self.test_row,
{"aff4:size": [(1, 100)],
"aff4:stored": [("2", 200)]},
token=self.token)
(stored, ts) = data_store.DB.Resolve(self.test_row, "aff4:size",
token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 100)
(stored, ts) = data_store.DB.Resolve(self.test_row, "aff4:stored",
token=self.token)
self.assertEqual(stored, "2")
self.assertEqual(ts, 200)
def testMultiSet3(self):
"""Test the MultiSet() delete methods."""
data_store.DB.MultiSet(self.test_row,
{"aff4:size": [1],
"aff4:stored": ["2"]},
token=self.token)
data_store.DB.MultiSet(self.test_row, {"aff4:stored": ["2"]},
to_delete=["aff4:size"],
token=self.token)
# This should be gone now
(stored, _) = data_store.DB.Resolve(self.test_row, "aff4:size",
token=self.token)
self.assertEqual(stored, None)
(stored, _) = data_store.DB.Resolve(self.test_row, "aff4:stored",
token=self.token)
self.assertEqual(stored, "2")
def testMultiSet4(self):
"""Test the MultiSet() delete methods when deleting the same predicate."""
data_store.DB.MultiSet(self.test_row,
{"aff4:size": [1],
"aff4:stored": ["2"]},
token=self.token)
data_store.DB.MultiSet(self.test_row, {"aff4:size": [4]},
to_delete=["aff4:size"],
token=self.token)
# This should only produce a single result
count = 0
for count, (predicate, value, _) in enumerate(data_store.DB.ResolveRegex(
self.test_row, "aff4:size", timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)):
self.assertEqual(value, 4)
self.assertEqual(predicate, "aff4:size")
self.assertEqual(count, 0)
def testMultiSetSetsTimestapWhenReplacing(self):
data_store.DB.MultiSet(self.test_row,
{"aff4:size": [(1, 100)]},
replace=True,
token=self.token)
(stored, ts) = data_store.DB.Resolve(self.test_row, "aff4:size",
token=self.token)
self.assertEqual(stored, 1)
self.assertEqual(ts, 100)
def testMultiSetRemovesOtherValuesWhenReplacing(self):
data_store.DB.MultiSet(self.test_row,
{"aff4:stored": [("2", 100), ("3", 200)]},
replace=False, token=self.token)
values = data_store.DB.ResolveRegex(self.test_row, "aff4:stored",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertListEqual(
values, [("aff4:stored", "3", 200), ("aff4:stored", "2", 100)])
data_store.DB.MultiSet(self.test_row,
{"aff4:stored": [("4", 150)]},
replace=True, token=self.token)
values = data_store.DB.ResolveRegex(self.test_row, "aff4:stored",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertListEqual(
values, [("aff4:stored", "4", 150)])
@DeletionTest
def testDeleteAttributes(self):
"""Test we can delete an attribute."""
predicate = "metadata:predicate"
data_store.DB.Set(self.test_row, predicate, "hello", token=self.token)
# Check its there
(stored, _) = data_store.DB.Resolve(self.test_row, predicate,
token=self.token)
self.assertEqual(stored, "hello")
data_store.DB.DeleteAttributes(self.test_row, [predicate], sync=True,
token=self.token)
(stored, _) = data_store.DB.Resolve(self.test_row, predicate,
token=self.token)
self.assertEqual(stored, None)
def CheckLength(self, predicate, l):
all_attributes = data_store.DB.ResolveMulti(
self.test_row, [predicate], timestamp=(0, 500),
token=self.token)
self.assertEqual(len(list(all_attributes)), l)
def CheckLast(self, predicate, expected_value, exptected_ts):
stored, ts = data_store.DB.Resolve(self.test_row, predicate,
token=self.token)
self.assertEqual(stored, expected_value)
self.assertEqual(ts, exptected_ts)
@DeletionTest
def testDeleteAttributesTimestamps(self):
"""Test we can delete an attribute in a time range."""
predicate = "metadata:tspredicate"
data_store.DB.Set(self.test_row, predicate, "hello100", timestamp=100,
replace=False, token=self.token)
data_store.DB.Set(self.test_row, predicate, "hello200", timestamp=200,
replace=False, token=self.token)
data_store.DB.Set(self.test_row, predicate, "hello300", timestamp=300,
replace=False, token=self.token)
data_store.DB.Set(self.test_row, predicate, "hello400", timestamp=400,
replace=False, token=self.token)
# Check its there
self.CheckLast(predicate, "hello400", 400)
self.CheckLength(predicate, 4)
# Delete timestamps between 0 and 150.
data_store.DB.DeleteAttributes(self.test_row, [predicate], start=0, end=150,
sync=True, token=self.token)
self.CheckLast(predicate, "hello400", 400)
self.CheckLength(predicate, 3)
# Delete timestamps between 350 and 450.
data_store.DB.DeleteAttributes(self.test_row, [predicate],
start=350, end=450, sync=True,
token=self.token)
self.CheckLast(predicate, "hello300", 300)
self.CheckLength(predicate, 2)
# Delete everything.
data_store.DB.DeleteAttributes(self.test_row, [predicate],
start=0, end=500, sync=True,
token=self.token)
self.CheckLast(predicate, None, 0)
self.CheckLength(predicate, 0)
@DeletionTest
def testDeleteSubjects(self):
predicate = "metadata:tspredicate"
data_store.DB.Set(self.test_row, predicate, "hello100", timestamp=100,
replace=False, token=self.token)
data_store.DB.DeleteSubject(self.test_row, token=self.token)
data_store.DB.Flush()
self.CheckLength(predicate, 0)
# This should work with the sync argument too.
data_store.DB.Set(self.test_row, predicate, "hello100", timestamp=100,
replace=False, token=self.token)
data_store.DB.DeleteSubject(self.test_row, token=self.token, sync=True)
self.CheckLength(predicate, 0)
def testMultiResolveRegex(self):
"""tests MultiResolveRegex."""
rows = self._MakeTimestampedRows()
subjects = dict(data_store.DB.MultiResolveRegex(
rows, ["metadata:[34]", "metadata:[78]"], token=self.token))
subject_names = subjects.keys()
subject_names.sort()
self.assertEqual(len(subjects), 4)
self.assertEqual(
subject_names,
[u"aff4:/row:3", u"aff4:/row:4", u"aff4:/row:7", u"aff4:/row:8"])
def testMultiResolveRegexTimestamp(self):
"""tests MultiResolveRegex with a timestamp."""
# Make some rows.
rows = []
for i in range(10):
row_name = "aff4:/row:%s" % i
data_store.DB.Set(
row_name, "metadata:%s" % i, "v%d" % i, timestamp=i + 10,
replace=False, token=self.token)
data_store.DB.Set(
row_name, "metadata:%s" % i, "v%d" % i, timestamp=i + 20,
replace=False, token=self.token)
rows.append(row_name)
# Query for newest ts.
subjects = dict(data_store.DB.MultiResolveRegex(
rows, ["metadata:[34]", "metadata:[78]"],
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token))
subject_names = subjects.keys()
subject_names.sort()
self.assertEqual(len(subjects), 4)
self.assertEqual(
subject_names,
[u"aff4:/row:3", u"aff4:/row:4", u"aff4:/row:7", u"aff4:/row:8"])
self.assertEqual(len(subjects[u"aff4:/row:3"]), 1)
self.assertEqual(len(subjects[u"aff4:/row:4"]), 1)
self.assertEqual(len(subjects[u"aff4:/row:7"]), 1)
self.assertEqual(len(subjects[u"aff4:/row:8"]), 1)
# Query for all ts.
subjects = dict(data_store.DB.MultiResolveRegex(
rows, ["metadata:[34]", "metadata:[78]"],
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
subject_names = subjects.keys()
subject_names.sort()
self.assertEqual(len(subjects), 4)
self.assertEqual(
subject_names,
[u"aff4:/row:3", u"aff4:/row:4", u"aff4:/row:7", u"aff4:/row:8"])
self.assertEqual(len(subjects[u"aff4:/row:3"]), 2)
self.assertEqual(len(subjects[u"aff4:/row:4"]), 2)
self.assertEqual(len(subjects[u"aff4:/row:7"]), 2)
self.assertEqual(len(subjects[u"aff4:/row:8"]), 2)
# Query such that not all subjects yield results.
subjects = dict(data_store.DB.MultiResolveRegex(
rows, ["metadata:[34]", "metadata:[78]"], timestamp=(12, 17),
token=self.token))
subject_names = subjects.keys()
subject_names.sort()
self.assertEqual(len(subjects), 3)
self.assertEqual(
subject_names,
[u"aff4:/row:3", u"aff4:/row:4", u"aff4:/row:7"])
self.assertEqual(len(subjects[u"aff4:/row:3"]), 1)
self.assertEqual(len(subjects[u"aff4:/row:4"]), 1)
self.assertEqual(len(subjects[u"aff4:/row:7"]), 1)
# Query such that some subjects yield more results.
subjects = dict(data_store.DB.MultiResolveRegex(
rows, ["metadata:[34]", "metadata:[78]"], timestamp=(14, 27),
token=self.token))
subject_names = subjects.keys()
subject_names.sort()
self.assertEqual(len(subjects), 4)
self.assertEqual(
subject_names,
[u"aff4:/row:3", u"aff4:/row:4", u"aff4:/row:7", u"aff4:/row:8"])
self.assertEqual(len(subjects[u"aff4:/row:3"]), 1)
self.assertEqual(len(subjects[u"aff4:/row:4"]), 2)
self.assertEqual(len(subjects[u"aff4:/row:7"]), 2)
self.assertEqual(len(subjects[u"aff4:/row:8"]), 1)
def _MakeTimestampedRows(self):
# Make some rows.
rows = []
for i in range(5):
row_name = "aff4:/row:%s" % i
timestamp = rdfvalue.RDFDatetime(100 + i)
data_store.DB.Set(row_name, "metadata:%s" % i, i, timestamp=timestamp,
token=self.token)
rows.append(row_name)
for i in range(5, 10):
row_name = "aff4:/row:%s" % i
timestamp = rdfvalue.RDFDatetime(100 + i)
data_store.DB.MultiSet(row_name, {"metadata:%s" % i: [i]},
timestamp=timestamp, token=self.token)
rows.append(row_name)
return rows
def _CheckResultTimestamps(self, result, expected_timestamps):
timestamps = []
for predicates in result.itervalues():
for predicate in predicates:
timestamps.append(predicate[2])
self.assertListEqual(sorted(timestamps), sorted(expected_timestamps))
def testResolveRegexResultsOrderedInDecreasingTimestampOrder1(self):
predicate1 = "metadata:predicate1"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order1"
# Set 1000 values with increasing timestamps.
for i in range(1000):
data_store.DB.Set(
subject, predicate1, str(i), timestamp=i * 1000, replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order.
# This test along with a next one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = data_store.DB.ResolveRegex(
subject, predicate1, timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
for result_index, i in enumerate(reversed(range(1000))):
self.assertEqual(result[result_index], (predicate1, str(i), i * 1000))
def testResolveRegexResultsOrderedInDecreasingTimestampOrder2(self):
predicate1 = "metadata:predicate1"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order2"
# Set 1000 values with timestamps starting in the future and going to
# the past.
for i in reversed(range(1000)):
data_store.DB.Set(
subject, predicate1, str(i), timestamp=i * 1000, replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = data_store.DB.ResolveRegex(
subject, predicate1, timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
for result_index, i in enumerate(reversed(range(1000))):
self.assertEqual(result[result_index], (predicate1, str(i), i * 1000))
def testResolveRegexResultsOrderedInDecreasingTimestampOrderPerColumn1(self):
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order_per_column1"
# Set 1000 values with increasing timestamps for each predicate.
for i in range(1000):
data_store.DB.Set(
subject, predicate1, str(i), timestamp=i * 1000, replace=False,
token=self.token)
data_store.DB.Set(
subject, predicate2, str(i), timestamp=i * 1000, replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order
# per column.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = list(data_store.DB.ResolveRegex(
subject, "metadata:predicate.*", timestamp=data_store.DB.ALL_TIMESTAMPS,
limit=10000, token=self.token))
predicate1_results = [r for r in result if r[0] == predicate1]
for result_index, i in enumerate(reversed(range(1000))):
self.assertEqual(predicate1_results[result_index],
(predicate1, str(i), i * 1000))
predicate2_results = [r for r in result if r[0] == predicate2]
for result_index, i in enumerate(reversed(range(1000))):
self.assertEqual(predicate2_results[result_index],
(predicate2, str(i), i * 1000))
def testResolveRegexResultsOrderedInDecreasingTimestampOrderPerColumn2(self):
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order_per_column2"
# Set 1000 values for each predicate with timestamps starting in the
# future and going to the past.
for i in reversed(range(1000)):
data_store.DB.Set(
subject, predicate1, str(i), timestamp=i * 1000, replace=False,
token=self.token)
data_store.DB.Set(
subject, predicate2, str(i), timestamp=i * 1000, replace=False,
token=self.token)
# Check that results will be returned in decreasing timestamp order
# per column.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = list(data_store.DB.ResolveRegex(
subject, "metadata:predicate.*", timestamp=data_store.DB.ALL_TIMESTAMPS,
limit=10000, token=self.token))
predicate1_results = [r for r in result if r[0] == predicate1]
for result_index, i in enumerate(reversed(range(1000))):
self.assertEqual(predicate1_results[result_index],
(predicate1, str(i), i * 1000))
predicate2_results = [r for r in result if r[0] == predicate2]
for result_index, i in enumerate(reversed(range(1000))):
self.assertEqual(predicate2_results[result_index],
(predicate2, str(i), i * 1000))
def testRDFDatetimeTimestamps(self):
test_rows = self._MakeTimestampedRows()
# Make sure all timestamps are set correctly.
result = dict(data_store.DB.MultiResolveRegex(
test_rows, ["metadata:.*"], token=self.token))
self._CheckResultTimestamps(result, range(100, 110))
# Now MultiResolve by timestamp.
timestamp = (rdfvalue.RDFDatetime(103), rdfvalue.RDFDatetime(108))
result = dict(data_store.DB.MultiResolveRegex(
test_rows, ["metadata:.*"], token=self.token, timestamp=timestamp))
# Timestamp selection is inclusive so we should have 103-108.
self._CheckResultTimestamps(result, range(103, 109))
# Now test timestamped attributes.
row_name = "aff4:/attribute_test_row"
attribute_name = "metadata:test_attribute"
attributes_to_set = {attribute_name: [
(i, rdfvalue.RDFDatetime(i)) for i in xrange(100, 110)]}
data_store.DB.MultiSet(row_name, attributes_to_set, replace=False,
token=self.token)
# Make sure all timestamps are set correctly.
result = dict(data_store.DB.MultiResolveRegex(
[row_name], ["metadata:.*"], timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
self._CheckResultTimestamps(result, range(100, 110))
if self.TEST_DELETION:
# Delete some of them.
data_store.DB.DeleteAttributes(row_name, [attribute_name],
start=rdfvalue.RDFDatetime(102),
end=rdfvalue.RDFDatetime(104),
token=self.token)
# Make sure that passing start==end deletes that version.
data_store.DB.DeleteAttributes(row_name, [attribute_name],
start=rdfvalue.RDFDatetime(106),
end=rdfvalue.RDFDatetime(106),
token=self.token)
result = dict(data_store.DB.MultiResolveRegex(
[row_name], ["metadata:.*"], timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
expected_timestamps = [100, 101, 105, 107, 108, 109]
self._CheckResultTimestamps(result, expected_timestamps)
@TransactionTest
def testTransactions(self):
"""Test transactions raise."""
predicate = u"metadata:predicateÎñţér"
subject = u"aff4:/metadata:rowÎñţér"
subject2 = u"aff4:/metadata:rowÎñţér2"
# t1 is holding a transaction on this row.
t1 = data_store.DB.Transaction(subject, token=self.token)
t1.Resolve(predicate)
# This means that modification of this row will fail using a different
# transaction.
self.assertRaises(
data_store.TransactionError, data_store.DB.Transaction,
subject, token=self.token)
# We should still be able to modify using the first transaction:
t1.Set(predicate, "1")
self.assertEqual(t1.Resolve(predicate)[0], "1")
t1.Commit()
self.assertEqual(
data_store.DB.Resolve(subject, predicate, token=self.token)[0], "1")
t2 = data_store.DB.Transaction(subject, token=self.token)
t2.Set(predicate, "2")
t2.Commit()
self.assertEqual(
data_store.DB.Resolve(subject, predicate, token=self.token)[0], "2")
# Check that ResolveRegex works correctly.
predicate1 = u"metadata:attribute10"
predicate1regex = "metadata:attribute1[0-9]"
predicate2 = u"metadata:attribute20"
t3 = data_store.DB.Transaction(subject, token=self.token)
t3.Set(predicate1, "10")
t3.Set(predicate2, "20")
self.assertEqual(t3.ResolveRegex(predicate1regex)[0][1], "10")
self.assertEqual(
t3.ResolveRegex(predicate1regex,
timestamp=data_store.DB.NEWEST_TIMESTAMP)[0][1], "10")
t3.Commit()
self.assertEqual(t3.Resolve(predicate1)[0], "10")
self.assertEqual(t3.Resolve(predicate2)[0], "20")
self.assertEqual(t3.ResolveRegex(predicate1regex)[0][1], "10")
self.assertEqual(
t3.ResolveRegex(predicate1regex,
timestamp=data_store.DB.NEWEST_TIMESTAMP)[0][1], "10")
t4 = data_store.DB.Transaction(subject, token=self.token)
t4.DeleteAttribute(predicate1)
self.assertEqual(t4.Resolve(predicate1), (None, 0))
self.assertEqual(len(t4.ResolveRegex(predicate1regex)), 0)
self.assertEqual(t4.Resolve(predicate2)[0], "20")
t4.Commit()
self.assertEqual(t4.Resolve(predicate1), (None, 0))
self.assertEqual(len(t4.ResolveRegex(predicate1regex)), 0)
# Check that locks don't influence each other.
# t1 is holding a transaction on this row.
t1 = data_store.DB.Transaction(subject, token=self.token)
t1.Resolve(predicate)
# This means that modification of this row will fail using a different
# transaction.
self.assertRaises(
data_store.TransactionError, data_store.DB.Transaction,
subject, token=self.token)
# t2 is holding a transaction on this row.
t2 = data_store.DB.Transaction(subject2, token=self.token)
t2.Resolve(predicate)
# This means that modification of this row will fail using a different
# transaction.
self.assertRaises(
data_store.TransactionError, data_store.DB.Transaction,
subject2, token=self.token)
t2.Commit()
# Subject 1 should still be locked.
self.assertRaises(
data_store.TransactionError, data_store.DB.Transaction,
subject, token=self.token)
t1.Commit()
@TransactionTest
def testTransactionLease(self):
subject = u"aff4:/leasetest"
predicate = "metadata:pred"
t = data_store.DB.Transaction(subject, token=self.token)
t.Resolve(predicate)
t.Set(predicate, "1")
t.UpdateLease(100)
res = t.Resolve(predicate)
t.Set(predicate, "2")
t.Commit()
self.assertEqual(res[0], "1")
t = data_store.DB.Transaction(subject, token=self.token)
self.assertEqual(t.Resolve(predicate)[0], "2")
@TransactionTest
def testAbortTransaction(self):
predicate = u"metadata:predicate_Îñţér"
row = u"metadata:row1Îñţér"
data_store.DB.DeleteSubject(row, token=self.token)
t1 = data_store.DB.Transaction(row, token=self.token)
# Now this should not raise since t1 and t2 are on different subjects
t1.Set(predicate, "1")
t1.Abort()
t2 = data_store.DB.Transaction(row, token=self.token)
self.assertEqual(t2.Resolve(predicate)[0], None)
t2.Set(predicate, "2")
t2.Commit()
self.assertEqual(t2.Resolve(predicate)[0], "2")
@TransactionTest
def testTransactions2(self):
"""Test that transactions on different rows do not interfere."""
predicate = u"metadata:predicate_Îñţér"
t1 = data_store.DB.Transaction(u"metadata:row1Îñţér", lease_time=100,
token=self.token)
# Our lease should be between 0 and 100 seconds.
self.assertLess(t1.CheckLease(), 100)
self.assertGreater(t1.CheckLease(), 0)
t2 = data_store.DB.Transaction(u"metadata:row2Îñţér", token=self.token)
# This grabs read locks on these transactions
t1.Resolve(predicate)
t2.Resolve(predicate)
# Now this should not raise since t1 and t2 are on different subjects
t1.Set(predicate, "1")
t1.Commit()
t2.Set(predicate, "2")
t2.Commit()
@TransactionTest
def testRetryWrapper(self):
subject = "aff4:/subject"
data_store.DB.DeleteSubject(subject, token=self.token)
call_count = stats.STATS.GetMetricValue("datastore_retries")
def MockSleep(_):
pass
def Callback(unused_transaction):
# Now that we have a transaction, lets try to get another one on the same
# subject. Since it is locked this should retry.
try:
data_store.DB.RetryWrapper(subject, lambda _: None,
token=self.token)
self.fail("Transaction error not raised.")
except data_store.TransactionError as e:
self.assertEqual("Retry number exceeded.", str(e))
self.assertEqual(
stats.STATS.GetMetricValue("datastore_retries") - call_count,
10)
# By mocking out sleep we can ensure all retries are exhausted.
with utils.Stubber(time, "sleep", MockSleep):
data_store.DB.RetryWrapper(subject, Callback, token=self.token)
def testTimestamps(self):
"""Check that timestamps are reasonable."""
predicate = "metadata:predicate"
subject = "aff4:test_timestamps"
# Extend the range of valid timestamps returned from the table to account
# for potential clock skew.
start = long(time.time() - 60) * 1e6
data_store.DB.Set(subject, predicate, "1", token=self.token)
(stored, ts) = data_store.DB.Resolve(subject, predicate, token=self.token)
# Check the time is reasonable
end = long(time.time() + 60) * 1e6
self.assert_(ts >= start and ts <= end)
self.assertEqual(stored, "1")
def testSpecificTimestamps(self):
"""Check arbitrary timestamps can be specified."""
predicate = "metadata:predicate"
subject = "aff4:/test_specific_timestamps"
# Check we can specify a timestamp
data_store.DB.Set(subject, predicate, "2", timestamp=1000, token=self.token)
(stored, ts) = data_store.DB.Resolve(subject, predicate, token=self.token)
# Check the time is reasonable
self.assertEqual(ts, 1000)
self.assertEqual(stored, "2")
def testNewestTimestamps(self):
"""Check that NEWEST_TIMESTAMP works as expected."""
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
subject = "aff4:/test_newest_timestamps"
# Check we can specify a timestamp
data_store.DB.Set(
subject, predicate1, "1.1", timestamp=1000, replace=False,
token=self.token)
data_store.DB.Set(
subject, predicate1, "1.2", timestamp=2000, replace=False,
token=self.token)
data_store.DB.Set(
subject, predicate2, "2.1", timestamp=1010, replace=False,
token=self.token)
data_store.DB.Set(
subject, predicate2, "2.2", timestamp=2020, replace=False,
token=self.token)
result = data_store.DB.ResolveRegex(
subject, predicate1, timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
# Should return 2 results. Newest should be first.
values = [x[1] for x in result]
self.assertEqual(len(values), 2)
self.assertListEqual(values, ["1.2", "1.1"])
times = [x[2] for x in result]
self.assertListEqual(times, [2000, 1000])
result = data_store.DB.ResolveRegex(
subject, predicate1, timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token)
# Should return 1 result - the most recent.
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1], "1.2")
self.assertEqual(result[0][2], 2000)
result = list(data_store.DB.ResolveRegex(
subject, "metadata:.*", timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token))
self.assertEqual(len(result), 4)
self.assertListEqual([r for r in result if r[0] == "metadata:predicate1"], [
(u"metadata:predicate1", "1.2", 2000),
(u"metadata:predicate1", "1.1", 1000)])
self.assertListEqual([r for r in result if r[0] == "metadata:predicate2"], [
(u"metadata:predicate2", "2.2", 2020),
(u"metadata:predicate2", "2.1", 1010)])
result = list(data_store.DB.ResolveRegex(
subject, "metadata:.*", timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token))
# Should only return the latest version.
self.assertItemsEqual(result, [
(u"metadata:predicate1", "1.2", 2000),
(u"metadata:predicate2", "2.2", 2020)])
def testResolveRegEx(self):
"""Test regex Resolving works."""
predicate = "metadata:predicate"
subject = "aff4:/resolve_regex"
# Check we can specify a timestamp
data_store.DB.Set(subject, predicate, "3", timestamp=1000, token=self.token)
results = [x for x in data_store.DB.ResolveRegex(subject, "metadata:pred.*",
timestamp=(0, 2000),
token=self.token)]
self.assertEqual(len(results), 1)
# Timestamp
self.assertEqual(results[0][2], 1000)
# Value
self.assertEqual(results[0][1], "3")
# Predicate
self.assertEqual(results[0][0], predicate)
def testResolveRegExPrefix(self):
"""Test resolving with .* works (basically a prefix search)."""
predicate = "metadata:predicate"
subject = "aff4:/test_resolve_regex_prefix"
# Check we can specify a timestamp
data_store.DB.Set(subject, predicate, "3", token=self.token)
results = [x for x in data_store.DB.ResolveRegex(subject, "metadata:.*",
token=self.token)]
self.assertEqual(len(results), 1)
# Value
self.assertEqual(results[0][1], "3")
# Predicate
self.assertEqual(results[0][0], predicate)
def testResolveMulti(self):
"""Test regex Multi Resolving works."""
subject = "aff4:/resolve_multi"
predicates = []
for i in range(0, 100):
predicate = "metadata:predicate" + str(i)
predicates.append(predicate)
data_store.DB.Set(subject, predicate, "Cell " + predicate, timestamp=1000,
token=self.token)
results = [x for x in data_store.DB.ResolveMulti(subject, predicates,
token=self.token)]
self.assertEqual(len(results), 100)
# Value
for i in range(0, 100):
self.assertEqual(results[i][1], "Cell " + predicates[i])
self.assertEqual(results[i][0], predicates[i])
# Now try to query for non existent predicates.
predicates = predicates[:10]
for i in range(10):
predicates.append("metadata:not_existing" + str(i))
results = [x for x in data_store.DB.ResolveMulti(subject, predicates,
token=self.token)]
self.assertEqual(10, len(results))
for i in range(0, 10):
self.assertEqual(results[i][1], "Cell " + predicates[i])
self.assertEqual(results[i][0], predicates[i])
def testAFF4Image(self):
# 500k
data = "randomdata" * 50 * 1024
# Create a blob.
digest = hashlib.sha256(data).digest()
urn = aff4.ROOT_URN.Add("blobs").Add(digest.encode("hex"))
blob_fd = aff4.FACTORY.Create(urn, "AFF4MemoryStream", mode="w",
token=self.token)
blob_fd.Write(data)
blob_fd.Close(sync=True)
# Now create the image containing the blob.
fd = aff4.FACTORY.Create("aff4:/C.1235/image", "HashImage",
token=self.token)
fd.SetChunksize(512 * 1024)
fd.Set(fd.Schema.STAT())
fd.AddBlob(digest, len(data))
fd.Close(sync=True)
# Chunks are written async, we have to flush here.
data_store.DB.Flush()
# Check if we can read back the data.
fd = aff4.FACTORY.Open("aff4:/C.1235/image", token=self.token)
self.assertEqual(fd.read(len(data)), data)
fd.Close()
def testDotsInDirectory(self):
"""Dots are special in MongoDB, check that they work in rows/indexes."""
for directory in ["aff4:/C.1240/dir",
"aff4:/C.1240/dir/a.b",
"aff4:/C.1240/dir/a.b/c",
"aff4:/C.1240/dir/b"]:
aff4.FACTORY.Create(directory, "VFSDirectory", token=self.token).Close()
# We want the indexes to be written now.
data_store.DB.Flush()
# This must not raise.
aff4.FACTORY.Open("aff4:/C.1240/dir/a.b/c", "VFSDirectory",
token=self.token)
index = data_store.DB.ResolveRegex("aff4:/C.1240/dir",
"index:dir/.+",
token=self.token)
subjects = [s for (s, _, _) in index]
self.assertTrue("index:dir/b" in subjects)
self.assertTrue("index:dir/a.b" in subjects)
directory = aff4.FACTORY.Open("aff4:/C.1240/dir", token=self.token)
self.assertEqual(2, len(list(directory.OpenChildren())))
self.assertEqual(2, len(list(directory.ListChildren())))
OPEN_WITH_LOCK_NUM_THREADS = 10
OPEN_WITH_LOCK_TRIES_PER_THREAD = 3
OPEN_WITH_LOCK_SYNC_LOCK_SLEEP = 0.2
@test_lib.SetLabel("large")
@TransactionTest
def testAFF4OpenWithLock(self):
self.opened = False
self.client_urn = "aff4:/C.0000000000000001"
client = aff4.FACTORY.Create(self.client_urn, "VFSGRRClient", mode="w",
token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Set(
client.Schema.LEASED_UNTIL(
rdfvalue.RDFDatetime().FromSecondsFromEpoch(0)))
client.Close()
self.open_failures = 0
self.close_failures = 0
self.results = []
def ParallelThread():
for _ in xrange(self.OPEN_WITH_LOCK_TRIES_PER_THREAD):
t = time.time()
try:
with aff4.FACTORY.OpenWithLock(
self.client_urn, token=self.token, blocking=True,
blocking_sleep_interval=self.OPEN_WITH_LOCK_SYNC_LOCK_SLEEP,
blocking_lock_timeout=10):
# We fail if another thread has the object already opened here.
if self.opened:
self.open_failures += 1
self.fail("Double open!")
self.opened = True
logging.info("Thread %s holding lock for 0.5 seconds.",
thread.get_ident())
time.sleep(0.5)
# We fail if someone has closed the object while we are holding it
# opened.
if not self.opened:
self.close_failures += 1
self.fail("Double close!")
self.results.append(thread.get_ident())
self.opened = False
return
except aff4.LockError:
logging.info("Lock failed after %s seconds - retying.",
(time.time() - t))
threads = []
for _ in range(self.OPEN_WITH_LOCK_NUM_THREADS):
t = threading.Thread(target=ParallelThread)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(self.open_failures, 0)
self.assertEqual(self.close_failures, 0)
# Make sure all threads got it eventually.
self.assertEqual(len(self.results), self.OPEN_WITH_LOCK_NUM_THREADS)
def _InstallACLChecks(self, forbidden_access):
if self.acls_installed:
raise RuntimeError("Seems like _InstallACLChecks was called twice in one "
"test")
self.acls_installed = True
data_store.DB.security_manager = test_lib.MockSecurityManager(
forbidden_datastore_access=forbidden_access)
def _ListedMultiResolveRegex(self, *args, **kwargs):
return list(data_store.DB.MultiResolveRegex(*args, **kwargs))
def _ListedResolveMulti(self, *args, **kwargs):
return list(data_store.DB.ResolveMulti(*args, **kwargs))
def _ListedResolveRegex(self, *args, **kwargs):
return list(data_store.DB.ResolveRegex(*args, **kwargs))
def _FlushedDeleteSubject(self, *args, **kwargs):
# DeleteSubject is not guaranteed to be synchronous. Make sure that
# we flush data store when testing it.
data_store.DB.DeleteSubject(*args, **kwargs)
data_store.DB.Flush()
def testSetChecksWriteAccess(self):
self._InstallACLChecks("w")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.Set,
self.test_row, "task:00000001", rdfvalue.GrrMessage(), token=self.token)
@DeletionTest
def testDeleteSubjectChecksWriteAccess(self):
self._InstallACLChecks("w")
self.assertRaises(
access_control.UnauthorizedAccess,
self._FlushedDeleteSubject,
self.test_row, token=self.token)
def testMultiSetChecksWriteAccess(self):
self._InstallACLChecks("w")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.MultiSet,
self.test_row, {"aff4:size": [(1, 100)],
"aff4:stored": [("foo", 200)]},
token=self.token)
@DeletionTest
def testDeleteAttributesChecksWriteAccess(self):
self._InstallACLChecks("w")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.DeleteAttributes,
self.test_row, ["metadata:predicate"], sync=True, token=self.token)
def testMultiResolveRegexChecksReadAccess(self):
self._InstallACLChecks("r")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedMultiResolveRegex,
[self.test_row], ["task:.*"], token=self.token)
def testMultiResolveRegexChecksQueryAccessWhenAccessingIndex(self):
self._InstallACLChecks("q")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedMultiResolveRegex,
[self.test_row], ["index:.*"], token=self.token)
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedMultiResolveRegex,
[self.test_row], ["task:.*", "index:.*"], token=self.token)
# Check that simple resolve doesn't require query access.
self._ListedMultiResolveRegex(
[self.test_row], ["task:.*"], token=self.token)
def testResolveMultiChecksReadAccess(self):
self._InstallACLChecks("r")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolveMulti,
self.test_row, ["task:000000001"], token=self.token)
def testResolveMultiChecksQueryAccessWhenAccessingIndex(self):
self._InstallACLChecks("q")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolveMulti,
self.test_row, ["index:dir/foo"], token=self.token)
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolveMulti,
self.test_row, ["task:00000001", "index:dir/foo"], token=self.token)
# Check that simple resolve doesn't require query access.
self._ListedResolveMulti(
self.test_row, ["task:00000001"], token=self.token)
def testResolveRegexChecksReadAccess(self):
self._InstallACLChecks("r")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolveRegex,
self.test_row, "task:.*", token=self.token)
def testResolveRegexChecksQueryAccessWhenAccessingIndex(self):
self._InstallACLChecks("q")
self.assertRaises(
access_control.UnauthorizedAccess,
self._ListedResolveRegex,
self.test_row, "index:.*", token=self.token)
# Check that simple resolve doesn't require query access.
self._ListedResolveRegex(
self.test_row, "task:.*", token=self.token)
def testResolveChecksReadAccess(self):
self._InstallACLChecks("r")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.Resolve,
self.test_row, "task:000000001", token=self.token)
def testResolveChecksQueryAccessWhenAccessingIndex(self):
self._InstallACLChecks("q")
self.assertRaises(
access_control.UnauthorizedAccess,
data_store.DB.Resolve,
self.test_row, "index:dir/foo", token=self.token)
# Check that simple resolve doesn't require query access.
data_store.DB.Resolve(
self.test_row, "task:00000001", token=self.token)
def testLimits(self):
# Create 10 rows with 10 attributes each.
subjects = ["aff4:limittest_%d" % i for i in xrange(10)]
attributes = ["metadata:limittest_%d" % i for i in xrange(10)]
value_idx = 0
for subject in subjects:
for attribute in attributes:
value = "value_%d" % value_idx
value_idx += 1
data_store.DB.Set(subject, attribute, value, token=self.token)
# ResolveRegex.
for limit in [1, 2, 5, 10, 100]:
results = data_store.DB.ResolveRegex(
subjects[0], "metadata:.*", limit=limit, token=self.token)
self.assertEqual(len(results), min(limit, 10))
# MultiResolveRegex.
for limit in [1, 2, 5, 9, 10, 11, 25, 100, 120]:
results = dict(data_store.DB.MultiResolveRegex(
subjects, "metadata:.*", limit=limit, token=self.token))
all_results = []
for subect_res in results.itervalues():
all_results.extend(subect_res)
self.assertEqual(len(all_results), min(limit, 100))
for limit in [1, 2, 5, 9, 10, 11, 25]:
results = dict(data_store.DB.MultiResolveRegex(
subjects, "metadata:limittest_7", limit=limit, token=self.token))
all_results = []
for subect_res in results.itervalues():
all_results.extend(subect_res)
self.assertEqual(len(all_results), min(limit, 10))
# ResolveMulti.
for limit in [1, 2, 5, 9, 10, 11, 25]:
results = list(data_store.DB.ResolveMulti(
subjects[2], attributes, limit=limit, token=self.token))
self.assertEqual(len(results), min(limit, 10))
def testApi(self):
api = ["DeleteAttributes",
"DeleteSubject",
"MultiResolveRegex",
"MultiSet",
"Resolve",
"ResolveMulti",
"ResolveRegex",
"Set",
"Transaction"]
implementation = data_store.DB
reference = data_store.DataStore
for f in api:
implementation_spec = inspect.getargspec(getattr(implementation, f))
reference_spec = inspect.getargspec(getattr(reference, f))
self.assertEqual(
implementation_spec, reference_spec,
"Signatures for function %s not matching: \n%s !=\n%s" %(
f, implementation_spec, reference_spec))
class DataStoreCSVBenchmarks(test_lib.MicroBenchmarks):
"""Long running benchmarks where the results are dumped to a CSV file.
These tests are deliberately not named with the test prefix, since they need
to be run individually to get true performance data. Run by specifying the
testname with --test and setting --labels=benchmark.
The CSV output filename will be printed in a log message at the end of the
test.
"""
# What we consider as a big number of attributes.
BIG_NUM_ATTRIBUTES = 1000
units = "s"
# Database counters.
subjects = 0
predicates = 0
values = 0
queries_total = 0 # Total queries.
queries_last_timestep = 0 # Number of the queries up to the last timestep.
steps = 0 # How many steps so far.
query_interval = 3000 # A step is composed of this many queries.
test_name = "" # Current operation being run.
start_time = None
last_time = None
predicate_template = "task:flow%d"
def setUp(self):
super(DataStoreCSVBenchmarks, self).setUp(["DB Size (KB)", "Queries",
"Subjects", "Predicates",
"Values"],
["<20", "<10", "<10", "<10",
"<10"])
self.InitDatastore()
self.start_time = time.time()
self.last_time = self.start_time
def tearDown(self):
self.Register(force=True)
super(DataStoreCSVBenchmarks, self).tearDown()
self.WriteCSV()
self.DestroyDatastore()
def Register(self, force=False):
"""Add a new result line to the benchmark result."""
self.queries_total += 1
if self.queries_total % self.query_interval == 0 or force:
data_store.DB.Flush()
this_time = time.time()
queries_diff = self.queries_total - self.queries_last_timestep
self.queries_last_timestep = self.queries_total
self.last_time = this_time
self.steps += 1
self.AddResult(self.test_name, this_time - self.start_time, self.steps,
data_store.DB.Size() / 1024, queries_diff, self.subjects,
self.predicates, self.values)
def WriteCSV(self, remove=False):
"""Write results to a CSV file."""
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as fp:
writer = csv.writer(fp, delimiter=" ")
writer.writerow(["Benchmark", "Time", "DBSize", "Queries", "Subjects",
"Predicates", "Values"])
for row in self.scratchpad[2:]:
writer.writerow([row[0], row[1], row[3], row[4], row[5],
row[6], row[7]])
logging.info("CSV File is in %s", fp.name)
if remove:
os.unlink(fp.name)
def _RandomlyReadSubject(self, subject, predicates):
"""Read certain parts of a given subject."""
for j, timestamps in predicates.items():
which = self.rand.randint(0, 2)
if which == 0:
# Read all timestamps.
data_store.DB.ResolveRegex(subject, self.predicate_template % j,
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
elif which == 1:
# Read a specific timestamp.
if timestamps:
ts = self.rand.choice(timestamps)
data_store.DB.ResolveRegex(subject, self.predicate_template % j,
timestamp=(ts, ts), token=self.token)
elif which == 2:
# Read latest.
data_store.DB.Resolve(subject, self.predicate_template % j,
token=self.token)
self.Register()
if self.rand.randint(0, 1) == 0:
# Find all attributes.
data_store.DB.ResolveRegex(subject, "task:flow.*",
timestamp=data_store.DB.NEWEST_TIMESTAMP,
token=self.token)
self.Register()
def _ReadRandom(self, subjects, fraction, change_test=True):
"""Randomly read the database."""
if change_test:
self.test_name = "read random %d%%" % fraction
for _ in range(0, int(float(len(subjects)) * float(fraction) / 100.0)):
i = self.rand.choice(subjects.keys())
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
self._RandomlyReadSubject(subject, predicates)
def _UpdateRandom(self, subjects, fraction, change_test=True):
"""Update values/predicates for a given fraction of the subjects."""
if change_test:
self.test_name = "update %d%%" % fraction
new_value = os.urandom(100)
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
if self.rand.randint(0, 100) > fraction:
continue
which = self.rand.randint(0, 2)
if which == 0 or which == 1:
for j, timestamp_info in predicates.items():
number_timestamps = len(timestamp_info)
if which == 0 and len(timestamp_info):
# Update one timestamp'ed value.
data_store.DB.Set(subject, self.predicate_template % j, new_value,
timestamp=timestamp_info[-1], token=self.token)
self.Register()
elif which == 1:
# Add another timestamp.
timestamp_info.append(100 * number_timestamps + 1)
data_store.DB.Set(subject, self.predicate_template % j, new_value,
timestamp=timestamp_info[-1], token=self.token)
self.values += 1
self.Register()
elif which == 2:
# Add an extra predicate.
j = len(predicates)
number_timestamps = self.rand.randrange(1, 3)
ts = [100 * (ts + 1) for ts in xrange(number_timestamps)]
predicates[j] = ts
self.values += number_timestamps
self.predicates += 1
values = [(new_value, t) for t in ts]
data_store.DB.MultiSet(subject, {self.predicate_template % j: values},
timestamp=100, token=self.token)
self.Register()
data_store.DB.Flush()
def _DeleteRandom(self, subjects, fraction, change_test=True):
"""Delete predicates/subjects/values at random."""
if change_test:
self.test_name = "delete %d%%" % fraction
subjects_to_delete = []
for i, info in subjects.items():
subject = info["name"]
predicates = info["attrs"]
number_predicates = len(predicates)
do_it = (self.rand.randint(0, 100) <= fraction)
which = self.rand.randint(0, 2)
count_values = 0
predicates_to_delete = []
for j, timestamp_info in predicates.items():
number_timestamps = len(timestamp_info)
count_values += number_timestamps
if do_it:
if which == 0:
# Delete one timestamp'ed value.
if timestamp_info:
ts = timestamp_info[0]
data_store.DB.DeleteAttributes(subject,
[self.predicate_template % j],
start=ts, end=ts,
token=self.token)
self.values -= 1
timestamp_info.pop(0)
self.Register()
else:
which = 1
if which == 1:
# Delete the attribute itself.
data_store.DB.DeleteAttributes(subject,
[self.predicate_template % j],
token=self.token)
self.values -= number_timestamps
self.predicates -= 1
predicates_to_delete.append(j)
self.Register()
if do_it and which == 1:
for j in predicates_to_delete:
del predicates[j]
if do_it and which == 2:
# Delete subject.
data_store.DB.DeleteSubject(subject, token=self.token)
self.predicates -= number_predicates
self.values -= count_values
self.subjects -= 1
subjects_to_delete.append(i)
self.Register()
for i in subjects_to_delete:
del subjects[i]
data_store.DB.Flush()
def _GrowRandomly(self, subjects, fraction, nclients, change_test=True):
"""Adds new clients/subjects to the database."""
if change_test:
self.test_name = "add %d%%" % fraction
how_many = int(float(len(subjects)) * float(fraction) / 100)
new_value = os.urandom(100)
new_subject = max(subjects.iteritems(),
key=operator.itemgetter(0))[0] + 1
# Generate client names.
clients = [self._GenerateRandomClient() for _ in xrange(nclients)]
for i in xrange(new_subject, new_subject + how_many):
client = clients[self.rand.randint(0, nclients - 1)]
self._AddNewSubject(client, subjects, i, new_value)
data_store.DB.Flush()
def _GenerateRandomSubject(self):
n = self.rand.randint(1, 5)
seps = [self._GenerateRandomString(self.rand.randint(5, 10))
for _ in xrange(n)]
return "/".join(seps)
def _AddNewSubject(self, client, subjects, i, value, max_attributes=3):
"""Add a new subject to the database."""
number_predicates = self.rand.randrange(1, max_attributes)
self.subjects += 1
predicates = dict.fromkeys(xrange(number_predicates))
self.predicates += number_predicates
subject = str(client.Add(self._GenerateRandomSubject()))
for j in xrange(number_predicates):
number_timestamps = self.rand.randrange(1, 3)
self.values += number_timestamps
ts = [100 * (ts + 1) for ts in xrange(number_timestamps)]
predicates[j] = ts
values = [(value, t) for t in ts]
data_store.DB.MultiSet(subject, {self.predicate_template % j: values},
timestamp=100, sync=False, token=self.token)
self.Register()
info = {"name": subject, "attrs": predicates}
subjects[i] = info
def _ReadLinear(self, subjects, fraction):
"""Linearly read subjects from the database."""
self.test_name = "read linear %d%%" % fraction
for i in subjects:
if self.rand.randint(0, 100) > fraction:
return
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
self._RandomlyReadSubject(subject, predicates)
def _AddManyAttributes(self, subjects, many):
"""Add lots of predicates to a given number of subjects."""
self.test_name = "add +attrs %d" % many
new_value = os.urandom(100)
for _ in range(0, many):
i = self.rand.choice(subjects.keys())
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
how_many = self.rand.randint(self.BIG_NUM_ATTRIBUTES,
self.BIG_NUM_ATTRIBUTES + 1000)
self.predicates += how_many
new_predicate = max(predicates.iteritems(),
key=operator.itemgetter(0))[0] + 1
for j in xrange(new_predicate, new_predicate + how_many):
number_timestamps = self.rand.randrange(1, 3)
ts = [100 * (ts + 1) for ts in xrange(number_timestamps)]
self.values += number_timestamps
values = [(new_value, t) for t in ts]
predicates[j] = ts
data_store.DB.MultiSet(subject, {self.predicate_template % j: values},
timestamp=100, sync=False, token=self.token)
self.Register()
data_store.DB.Flush()
def _RemoveManyAttributes(self, subjects, fraction):
"""Delete all predicates (except 1) from subjects with many predicates."""
self.test_name = "del +attrs %d%%" % fraction
often = 100 / fraction
count = 0
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
number_predicates = len(predicates)
if number_predicates >= self.BIG_NUM_ATTRIBUTES:
count += 1
if count == often:
count = 0
predicates_to_delete = [j for j in predicates.keys()[1:]]
values_deleted = sum(len(predicates[x])
for x in predicates_to_delete)
self.values -= values_deleted
self.predicates -= len(predicates_to_delete)
for j in predicates_to_delete:
del predicates[j]
data_store.DB.DeleteAttributes(subject,
[self.predicate_template % j],
sync=False, token=self.token)
self.Register()
data_store.DB.Flush()
def _Wipeout(self, subjects):
"""Delete every subject from the database."""
self.test_name = "wipeout"
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
number_predicates = len(predicates)
count_values = 0
for j in predicates:
count_values += len(predicates[j])
data_store.DB.DeleteSubject(subject, token=self.token)
self.predicates -= number_predicates
self.values -= count_values
self.subjects -= 1
self.Register()
subjects = {}
data_store.DB.Flush()
def _DoMix(self, subjects):
"""Do a mix of database operations."""
self.test_name = "mix"
for _ in xrange(0, len(subjects) / 2000):
# Do random operations.
op = self.rand.randint(0, 3)
if op == 0:
self._ReadRandom(subjects, 14, False)
elif op == 1:
self._GrowRandomly(subjects, 5, 20, False)
elif op == 2:
self._UpdateRandom(subjects, 10, False)
elif op == 3:
self._DeleteRandom(subjects, 4, False)
def _GenerateRandomClient(self):
return rdfvalue.ClientURN("C.%016d" % self.rand.randint(0, (10 ** 16) - 1))
def _FillDatabase(self, nsubjects, nclients,
max_attributes=3):
"""Fill the database with a certain number of subjects and clients."""
self.rand = random.Random(0)
self.test_name = "fill"
self.AddResult(self.test_name, 0, self.steps, data_store.DB.Size(),
0, 0, 0, 0)
subjects = dict.fromkeys(xrange(nsubjects))
value = os.urandom(100)
clients = [self._GenerateRandomClient() for _ in xrange(nclients)]
for i in subjects:
client = self.rand.choice(clients)
self._AddNewSubject(client, subjects, i, value, max_attributes)
data_store.DB.Flush()
return subjects
def _GenerateRandomString(self, chars):
return "".join([self.rand.choice(string.ascii_letters)
for _ in xrange(chars)])
def _AddBlobs(self, howmany, size):
"""Adds 'howmany' blobs with size 'size' kbs."""
self.test_name = "add blobs %dx%dk" % (howmany, size)
blob_info = []
count = 0
often = howmany / 10
for count in xrange(howmany):
data = self._GenerateRandomString(1024 * size)
# Create a blob.
digest = hashlib.sha256(data).digest()
code = digest.encode("hex")
urn = aff4.ROOT_URN.Add("blobs").Add(code)
blob_fd = aff4.FACTORY.Create(urn, "AFF4MemoryStream", mode="w",
token=self.token)
blob_fd.Write(data)
blob_fd.Close(sync=True)
blob_info.append(urn)
if count % often == 0:
# Because adding blobs, takes too long we force the output of
# new results.
self.Register(force=True)
self.Register(force=True)
data_store.DB.Flush()
return blob_info
@test_lib.SetLabel("benchmark")
def manySubjectsFewAttrs(self):
"""Database with many subjects with few attributes."""
subjects = self._FillDatabase(25000, 500)
self._ReadLinear(subjects, 50)
self._UpdateRandom(subjects, 50)
self._ReadRandom(subjects, 70)
self._DeleteRandom(subjects, 40)
self._GrowRandomly(subjects, 40, 50)
self._ReadRandom(subjects, 100)
self._DoMix(subjects)
self._Wipeout(subjects)
@test_lib.SetLabel("benchmark")
def manySubjectsFewWithManyAttrs(self):
"""Database where a few subjects have many attributes."""
subjects = self._FillDatabase(25000, 500)
self._UpdateRandom(subjects, 50)
self._AddManyAttributes(subjects, 100)
self._ReadRandom(subjects, 30)
# For 1/2 of the subjects with many attributes, remove all but
# one of the attributes.
self._RemoveManyAttributes(subjects, 50)
self._ReadRandom(subjects, 30)
self._UpdateRandom(subjects, 50)
self._Wipeout(subjects)
@test_lib.SetLabel("benchmark")
def fewSubjectsManyAttrs(self):
"""Database with a few subjects with many attributes."""
subjects = self._FillDatabase(100, 5)
self._UpdateRandom(subjects, 100)
self._AddManyAttributes(subjects, 50)
self._ReadRandom(subjects, 30)
self._RemoveManyAttributes(subjects, 50)
self._ReadRandom(subjects, 50)
self._Wipeout(subjects)
@test_lib.SetLabel("benchmark")
def blobs(self):
"""Database that stores blobs of increasing size."""
subjects = self._FillDatabase(10000, 200)
def _ReadUpdate():
self._ReadRandom(subjects, 75)
self._UpdateRandom(subjects, 20)
_ReadUpdate()
self._AddBlobs(50, 512)
_ReadUpdate()
self._AddBlobs(50, 2048)
_ReadUpdate()
self._AddBlobs(50, 10240)
_ReadUpdate()
self._AddBlobs(20, 10240 * 10)
_ReadUpdate()
@test_lib.SetLabel("benchmark")
def manySubjectsManyAttrs(self):
"""Database with many subjects with many attributes."""
subjects = self._FillDatabase(25000, 500, 50)
self._ReadLinear(subjects, 50)
self._UpdateRandom(subjects, 50)
self._ReadRandom(subjects, 50)
self._DeleteRandom(subjects, 40)
self._GrowRandomly(subjects, 40, 50)
self._ReadRandom(subjects, 50)
self._DoMix(subjects)
self._Wipeout(subjects)
class DataStoreBenchmarks(test_lib.MicroBenchmarks):
"""Datastore micro benchmarks.
These tests should be run with --labels=benchmark
"""
queue = rdfvalue.RDFURN("BENCHMARK")
units = "s"
def setUp(self):
super(DataStoreBenchmarks, self).setUp()
self.InitDatastore()
self.tp = threadpool.ThreadPool.Factory("test_pool", 50)
self.tp.Start()
def tearDown(self):
super(DataStoreBenchmarks, self).tearDown()
self.tp.Stop()
self.DestroyDatastore()
def InitDatastore(self):
"""Initiates custom data store."""
def DestroyDatastore(self):
"""Destroys custom data store."""
def GenerateFiles(self, client_id, n, directory="dir/dir"):
res = []
for i in xrange(n):
res.append(rdfvalue.StatEntry(
aff4path="aff4:/%s/fs/os/%s/file%d" % (client_id, directory, i),
st_mode=33261,
st_ino=1026267,
st_dev=51713,
st_nlink=1,
st_uid=0,
st_gid=0,
st_size=60064,
st_atime=1308964274,
st_mtime=1285093975,
st_ctime=1299502221,
st_blocks=128,
st_blksize=4096,
st_rdev=0,
pathspec=rdfvalue.PathSpec(path="/dir/dir/file%d" % i,
pathtype=0)))
return res
def StartFlow(self, client_id):
flow_id = flow.GRRFlow.StartFlow(client_id=client_id,
flow_name="ListDirectory",
queue=self.queue,
pathspec=rdfvalue.PathSpec(
path="/",
pathtype="OS",
),
token=self.token)
self.flow_ids.append(flow_id)
messages = []
for d in range(self.nr_dirs):
messages += self.GenerateFiles(client_id, self.files_per_dir,
"dir/dir%d" % d)
messages.append(rdfvalue.GrrStatus())
with queue_manager.QueueManager(token=self.token) as flow_manager:
for i, payload in enumerate(messages):
msg = rdfvalue.GrrMessage(
session_id=flow_id,
request_id=1, response_id=1 + i,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED,
payload=payload)
if isinstance(payload, rdfvalue.GrrStatus):
msg.type = 1
flow_manager.QueueResponse(flow_id, msg)
nr_clients = 4
nr_dirs = 4
files_per_dir = 500
@test_lib.SetLabel("benchmark")
def testSimulateFlows(self):
self.flow_ids = []
self.units = "s"
client_ids = ["C.%016X" % j for j in range(1, self.nr_clients + 1)]
start_time = time.time()
for client_id in client_ids:
self.tp.AddTask(self.StartFlow, (client_id,))
self.tp.Join()
notifications = [
rdfvalue.GrrNotification(session_id=f) for f in self.flow_ids]
with queue_manager.QueueManager(sync=True, token=self.token) as manager:
manager.MultiNotifyQueue(notifications)
time_used = time.time() - start_time
self.AddResult("Generate Messages (%d clients, %d files)" % (
self.nr_clients, self.nr_dirs * self.files_per_dir), time_used, 1)
my_worker = worker.GRRWorker(queues=[self.queue], token=self.token)
start_time = time.time()
while my_worker.RunOnce():
pass
my_worker.thread_pool.Join()
time_used = time.time() - start_time
self.AddResult("Process Messages", time_used, 1)
@test_lib.SetLabel("benchmark")
def testMicroBenchmarks(self):
# Tests run in arbitrary order but for the benchmarks, the order makes a
# difference so we call them all from one test here.
self.n = 1000
self.small_n = self.n / 100
self.units = "ms"
self.BenchmarkWriting()
self.BenchmarkReading()
self.BenchmarkWritingThreaded()
self.BenchmarkReadingThreaded()
self.BenchmarkAFF4Locks()
def BenchmarkWriting(self):
subject_template = "aff4:/row%d"
predicate_template = "task:flow%d"
value = os.urandom(100)
large_value = os.urandom(10 * 1024 * 1024)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Set(subject_template % i, "task:flow", value,
token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set rows", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Set("aff4:/somerow", predicate_template % i,
value, token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set attributes", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Set("aff4:/somerow", "task:someflow", value,
replace=False, token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set versions", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
data_store.DB.Set("aff4:/largerow%d" % i, "task:largeflow",
large_value, replace=False, token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set large values", (end_time - start_time) / self.small_n,
self.small_n)
def BenchmarkReading(self):
subject_template = "aff4:/row%d"
predicate_template = "task:flow%d"
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Resolve(subject_template % i, "task:flow", token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get rows", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
data_store.DB.Resolve("aff4:/somerow", predicate_template % i,
token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get attributes", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
data_store.DB.ResolveRegex("aff4:/somerow", "task:someflow",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get all versions", (end_time - start_time) / self.small_n,
self.small_n)
start_time = time.time()
for i in xrange(self.small_n):
res = data_store.DB.ResolveRegex("aff4:/largerow%d" % i, "task:largeflow",
timestamp=data_store.DB.ALL_TIMESTAMPS,
token=self.token)
self.assertEqual(len(res), 1)
self.assertEqual(len(res[0][1]), 10 * 1024 * 1024)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get large values", (end_time - start_time) / self.small_n,
self.small_n)
def BenchmarkWritingThreaded(self):
subject_template = "aff4:/threadedrow%d"
predicate_template = "task:threadedflow%d"
value = os.urandom(100)
large_value = os.urandom(10 * 1024 * 1024)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Set, (subject_template % i,
"task:threadedflow",
value, None, self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set rows",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Set, ("aff4:/somerowthreaded",
predicate_template % i,
value, None, self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set attributes",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Set, ("aff4:/somerowthreaded",
"task:someflowthreaded",
value, None,
self.token, False))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set versions",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
self.tp.AddTask(data_store.DB.Set, ("aff4:/threadedlargerow%d" % i,
"task:largeflowthreaded", large_value,
None, self.token, False))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set large values",
(end_time - start_time) / self.small_n, self.small_n)
def ResolveRegexAndCheck(self, subject, predicate, expected_items=1000):
res = data_store.DB.ResolveRegex(subject, predicate, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS)
self.assertEqual(len(list(res)), expected_items)
def BenchmarkReadingThreaded(self):
subject_template = "aff4:/threadedrow%d"
predicate_template = "task:threadedflow%d"
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Resolve, (
subject_template % i, "task:threadedflow", self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get rows",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.n):
self.tp.AddTask(data_store.DB.Resolve, (
"aff4:/somerowthreaded", predicate_template % i, self.token))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get attributes",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in xrange(self.small_n):
self.tp.AddTask(self.ResolveRegexAndCheck, ("aff4:/somerowthreaded",
"task:someflowthreaded"))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get all versions",
(end_time - start_time) / self.small_n, self.small_n)
start_time = time.time()
for i in xrange(self.small_n):
self.tp.AddTask(self.ResolveRegexAndCheck, (
"aff4:/threadedlargerow%d" % i,
"task:largeflowthreaded", 1))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get large values",
(end_time - start_time) / self.small_n, self.small_n)
def BenchmarkAFF4Locks(self):
self.client_id = "C.%016X" % 999
# Write some data to read.
client = aff4.FACTORY.Create(self.client_id, "VFSGRRClient", mode="w",
token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
cl = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertEqual(cl.Get(cl.Schema.HOSTNAME), "client1")
# Collect exceptions in threads.
self.fails = []
def Thread():
try:
# Using blocking_lock_timeout of 10 minutes to avoid possible
# timeouts when running tests on slow hardware.
with aff4.FACTORY.OpenWithLock(self.client_id, token=self.token,
blocking=True,
blocking_sleep_interval=0.2,
blocking_lock_timeout=600) as client:
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
except Exception as e: # pylint: disable=broad-except
self.fails.append(e)
start_time = time.time()
for _ in xrange(self.n):
Thread()
end_time = time.time()
self.AddResult("OpenWithLock",
(end_time - start_time) / self.n, self.n)
self.assertEqual(len(self.fails), 0)
start_time = time.time()
for _ in xrange(self.n):
self.tp.AddTask(Thread, ())
self.tp.Join()
end_time = time.time()
self.AddResult("Multithreaded: OpenWithLock",
(end_time - start_time) / self.n, self.n)
self.assertEqual(len(self.fails), 0)
|
settings_20210906114121.py | """
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# ///////////////////////////////SCHEDULE THE decrease_day_count_and_send_bday_mails ////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:42").do(decrease_day_count_and_send_bday_mails)
def func():
while True:
# print("======Runnning==========")
schedule.run_pending()
time.sleep(1)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
ring_bot.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# -----------------------------------------------------------
# bot
# created 01.10.2021
# Thomas Kaulke, kaulketh@gmail.com
# https://github.com/kaulketh
# -----------------------------------------------------------
import os
import signal
import time
from multiprocessing import Process
import telepot
from telepot.loop import MessageLoop
from bot import singleton
from config import RING_BOT_TOKEN, RING_BOT_NAME, RING_RING_GROUP, \
THK # no public deployment (secret.py)
from config import switch_state, DING_DONG, WELCOME, RUNNING, STOPPED, \
UNKNOWN_CMD, UNKNOWN_TYPE, CMD_START, CMD_STOP, CMD_REBOOT, REBOOT, \
START, STARTED, STOPPING
from logger import LOGGER
class RingBot(singleton.Singleton):
""" Bot class using telepot framework
(https://telepot.readthedocs.io),
Python >= 3
"""
def __init__(self, token, admin):
self.__log = LOGGER
self.__log.debug(f"Initialize instance of {self.__class__.__name__}")
self.__token = token
self.__admin = admin
self.__bot = telepot.Bot(self.__token)
self.__ding_dong = DING_DONG.format(RING_BOT_NAME)
self.__receiver = RING_RING_GROUP
self.__checker = None
def __check_bell(self, timeout=.25):
while True:
if switch_state():
self.__log.info(switch_state())
self.__send(self.__receiver, self.__ding_dong)
time.sleep(timeout)
def __send(self, chat_id, text):
self.__log.debug(
f"Message posted: "
f"{chat_id}|{text}".replace("\n", " "))
self.__bot.sendMessage(chat_id, text)
def __handle(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
self.__log.debug(msg)
# check user
if chat_id != self.__admin:
# TODO: wrong id
pass
return
# check content
if content_type == 'text':
command = msg['text']
self.__log.info(f"Got command '{command}'")
# commands
# start
if command == CMD_START:
if self.__checker is None:
self.__checker = Process(target=self.__check_bell)
self.__checker.start()
self.__send(self.__admin, STARTED)
self.__send(self.__admin, RUNNING)
# stop
elif command == CMD_STOP:
if isinstance(self.__checker, Process):
self.__checker.terminate()
self.__checker = None
self.__send(self.__admin, STOPPING)
self.__send(self.__admin, STOPPED)
elif command == CMD_REBOOT:
self.__send(self.__admin, REBOOT.format(RING_BOT_NAME))
os.system("sudo reboot")
else:
self.__send(self.__admin, UNKNOWN_CMD)
else:
self.__send(self.__admin, UNKNOWN_TYPE)
def start(self):
try:
MessageLoop(self.__bot,
{'chat': self.__handle}).run_as_thread()
self.__log.info(START)
self.__send(self.__admin, WELCOME.format(RING_BOT_NAME))
while True:
try:
signal.pause()
except KeyboardInterrupt:
self.__log.warning('Program interrupted')
exit()
except Exception as e:
self.__log.error(f"An error occurred: {e}")
exit()
def run():
RingBot(RING_BOT_TOKEN, THK).start()
if __name__ == '__main__':
pass
|
VideoStream.py |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
from threading import Thread
import sys
import cv2
from queue import Queue
# This class reads all the video frames in a separate thread and always has the
# keeps only the latest frame in its queue to be grabbed by another thread
class VideoStream(object):
def __init__(self, path, queueSize=15):
self.path = path
print("Reading frames from path:: " + path)
self.stream = cv2.VideoCapture(path)
self.stopped = False
self.Q = Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
try:
while True:
if self.stopped:
print("Exiting..from stopped")
return
if not self.Q.full():
(grabbed, frame) = self.stream.read()
#print(grabbed)
# if the `grabbed` boolean is `False`, then we have
if "sample_video" in self.path :
if not grabbed:
#print('no video RESETTING FRAMES TO 0 TO RUN IN LOOP')
#self.stream.set(cv2.CAP_PROP_POS_FRAMES, 0)
#continue
print("Video path not able to grab frames returning...")
self.stop()
return
elif not grabbed:
#Uncomment to stop on end
print("not able to grab frames returning...")
self.stop()
return
self.Q.put(frame)
#Clean the queue to keep only the latest frame
while self.Q.qsize() > 1:
self.Q.get()
except Exception as e:
print("got error: "+str(e))
def read(self):
return self.Q.get()
def more(self):
return self.Q.qsize() > 0
def stop(self):
self.stopped = True
def __exit__(self, exception_type, exception_value, traceback):
self.stream.release()
|
array_utils.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Utilities for manipulating, storing, and loading experiment data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
import io
import threading
from absl import logging
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v2 as tf
gfile = tf.io.gfile
_TFR_OPTIONS = tf.io.TFRecordOptions('GZIP')
SLICE = enum.Enum('SliceKey', ['ALL'])
def write_npz(output_dir, basename, stats_dict):
"""Write a dictionary of numpy arrays as an npz file.
Args:
output_dir: Directory for the output file.
basename: Basename for output file path.
stats_dict: Dictionary of strings to np.ndarrays.
"""
bytesio = io.BytesIO()
stats_dict = {
k: np.stack(arr) if isinstance(arr, list) else arr
for k, arr in six.iteritems(stats_dict)
}
np.savez_compressed(bytesio, **stats_dict)
path = '%s/%s' % (output_dir, basename)
logging.info('Recording stats to %s', path)
with gfile.GFile(path, 'wb') as file_handle:
file_handle.write(bytesio.getvalue())
def _dict_as_namedtuple(d):
return collections.namedtuple('tup', list(d.keys()))(**d)
def load_npz(path, as_namedtuple=False):
"""Load dictionary of arrays from an npz file.
Args:
path: File path to npz file.
as_namedtuple: If true, return the dictionary as a namedtuple.
Returns:
Dictionary (or namedtuple) of npz file contents.
"""
with gfile.GFile(path) as fl:
bytesio = io.BytesIO(fl.read())
out = dict(np.load(bytesio))
return _dict_as_namedtuple(out) if as_namedtuple else out
def stats_dict_to_tfexample(stats):
"""Converts a dictionary of numpy arrays to a tf.Example proto."""
example = tf.train.Example()
fm = example.features.feature
for key, arr in six.iteritems(stats):
arr = np.array(arr)
if key.endswith('/shape'):
raise ValueError('Invalid key: %s' % key)
if arr.dtype in (np.float32, np.float64):
fm[key].float_list.value.extend(arr.reshape([-1]))
fm[key + '/shape'].int64_list.value.extend(arr.shape)
elif arr.dtype in (np.int32, np.int64):
fm[key].int64_list.value.extend(arr.reshape([-1]))
fm[key + '/shape'].int64_list.value.extend(arr.shape)
else:
raise NotImplementedError('Unsupported array type %s for key=%s'
% (type(arr), key))
return example
def tfexample_to_stats_dict(example):
"""Converts a tf.Example proto into a dictionary of numpy arrays."""
out = {}
fm = example.features.feature
for key, value in six.iteritems(fm):
if key.endswith('/shape'):
continue
arr = (value.int64_list.value or
value.float_list.value or
value.bytes_list.value)
shape = fm[key + '/shape'].int64_list.value
out[key] = np.array(arr).reshape(shape)
return out
def load_stats_from_tfrecords(path, max_records=None, as_namedtuple=False,
gzip=False):
"""Loads data from a TFRecord table into a dictionary of np arrays.
Args:
path: Path to TFRecord file.
max_records: Maximum number of records to read.
as_namedtuple: If true, return the stats-dictionary as a namedtuple.
gzip: Whether to use gzip compression.
Returns:
Dictionary (or namedtuple) of numpy arrays.
"""
out = collections.defaultdict(list)
if tf.executing_eagerly():
itr = tf.data.TFRecordDataset(
path, compression_type='GZIP' if gzip else None)
parse_record = lambda x: tf.train.Example.FromString(x.numpy())
else:
tfr_options = _TFR_OPTIONS if gzip else None
itr = tf.compat.v1.python_io.tf_record_iterator(path, tfr_options)
parse_record = tf.train.Example.FromString
for i, rec in enumerate(itr):
if max_records and i >= max_records:
break
example = parse_record(rec)
stats = tfexample_to_stats_dict(example)
for key, array in six.iteritems(stats):
out[key].append(array)
out = {k: np.stack(arr) for k, arr in six.iteritems(out)}
return _dict_as_namedtuple(out) if as_namedtuple else out
class StatsWriter(object):
"""Simple wrapper class to record stats-dictionaries in TFRecord tables."""
def __init__(self, path, gzip=False):
self._writer = tf.io.TFRecordWriter(path, _TFR_OPTIONS if gzip else None)
def write(self, stats):
tfexample = stats_dict_to_tfexample(stats)
self._writer.write(tfexample.SerializeToString())
def write_batch(self, stats_batch):
batch_size, = set(len(x) for x in six.itervalues(stats_batch))
for i in range(batch_size):
stats_i = {k: v[i] for k, v in six.iteritems(stats_batch)}
tfexample = stats_dict_to_tfexample(stats_i)
self._writer.write(tfexample.SerializeToString())
def __del__(self):
self._writer.flush()
self._writer.close()
def slice_structure(struct, keys):
"""Generalized (but limited) slice function on nested structures.
This function offers limited numpy-style array slicing on nested structures
of maps, lists, tuples, and arrays. Specifically, by assuming similar
structures along each dictionary / list / tuple value, we can support
select-all and index-list slicing (e.g. x[3, :, 1] or x[3, indices, 1]).
For example,
x = {'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}
slice_structure(x, [SLICE.ALL, 0])
will yield `{'a': 1, 'b': 3, 'c': 5}`
and
slice_structure(x, [['a', 'c', 'b'], 0])
yields `[1, 5, 3]`.
Args:
struct: Nested structure of dictionaries, lists, tuples, numpy arrays.
keys: List of keys to apply at each successive depth;
SLICE.ALL gathers all items.
Returns:
Nested structure with specified slices applied.
Note: Structure elments are not necessarily copied in the process.
"""
if not keys:
return struct
if keys[0] is SLICE.ALL:
if isinstance(struct, dict):
return {k: slice_structure(v, keys[1:]) for k, v in struct.items()}
elif isinstance(struct, (list, tuple)):
return type(struct)([slice_structure(struct_i, keys[1:])
for struct_i in struct])
else:
raise NotImplementedError('Unsupported type for ALL: %s.' % type(struct))
# List-of-indices slicing.
elif isinstance(keys[0], list):
return [slice_structure(struct[k], keys[1:]) for k in keys[0]]
# Simple get-element-at-index case.
else:
return slice_structure(struct[keys[0]], keys[1:])
class _MapResult(object):
"""Simple temporary container for threaded_map_structure() results.
Note: We can't use a simple Python list (or other builtin mutable container)
for this since tf.nest.map_structure will traverse the list and operate on
its elements.
Attributes:
result: Equals None before calling the map-function;
assigned to the function output afterwards.
"""
def __init__(self):
self.result = None
def assign(self, x):
"""Assigns a value to a container attribute for later retrieval."""
self.result = x
def threaded_map_structure(fn, *args):
"""Executes tf.nest.map_structure with parallel threads for each map call.
Primarily useful for slow, non-compute functions (e.g. loading data from CNS).
See tf.nest.map_structure for details.
Args:
fn: Function to map across leaf nodes in args structure.
*args: Nested structures of arguments to map over.
Returns:
Parallel structure to the one in args with map results.
"""
fn_nooutput = lambda result, *args_: result.assign(fn(*args_))
def make_thread_fn(result, *args_):
return threading.Thread(target=fn_nooutput, args=(result,) + args_)
outputs = tf.nest.map_structure(lambda *_: _MapResult(), *args)
threads = tf.nest.map_structure(make_thread_fn, outputs, *args)
tf.nest.map_structure(lambda t: t.start(), threads)
tf.nest.map_structure(lambda t: t.join(), threads)
return tf.nest.map_structure(lambda x: x.result, outputs)
|
timing_utils.py | # -*- coding: utf-8 -*-
"""
Created on Sun May 24 22:08:53 2015
This file provides tools for better locking of timing in python programs
@author: Ian Spielman
"""
import time
from Queue import Queue
from threading import Thread
#
# Thread programs that run independentally and monitor timing
#
def SleepUntil(delay, timing_queue):
time.sleep(delay)
timing_queue.put("done")
def Countdown(delay, countdown_queue, interval, countdown_mode):
"""
This is for sending a timing stream to a queue, for example
for making a progress bar. interval is the time between updates
to the queue.
"""
if interval > delay:
interval = delay
num = int(delay / interval)
true_interval = delay / num
initial_time = time.time()
final_time = initial_time + delay
for i in range(num):
sleep_time = max(
(i+1)*true_interval - (time.time()-initial_time),
0)
time.sleep(sleep_time)
remainig_time = max(final_time - time.time(),0)
if countdown_mode == 'precent_remaining':
remainig_time = 100*remainig_time/delay
elif countdown_mode == 'precent_done':
remainig_time = 100*(1-remainig_time/delay)
countdown_queue.put(remainig_time)
#
# Main timing class
#
class timer():
"""
provides ability to block program execution until time limit has expired
privides a method to send ticks via a queue to other programs
Unlike sleep, you can start a timer, perform some computations and then
wait until the time has elapsed.
"""
def __init__(self, delay=0):
"""
Setup timer
"""
# status monitor if we are timing
self._timing = False
# expected delay time
self._delay = delay
# Start time
self._start_time = time.time()
# queue to contacting timing thread
self._timing_queue = Queue()
def _timer_done(self):
self._timing = False
def start(self, delay=None, countdown_queue=None, interval=1.0, countdown_mode=None):
"""
Start a timer that will expire after delay seconds
if countdown_queue is passed we will start a second thread
that puts the remaining time into that queue updated every interval
if countdown_mode == 'precent_remaining' then countdown monitor will monitor precent remaining
if countdown_mode == 'precent_done' then countdown monitor will monitor precent done
"""
if delay is not None:
self._delay = delay
if self._timing == True:
raise RuntimeError('already timing')
# Do not start a timer for zero or negative times.
if delay <= 0:
# just cleanup
self._timer_done()
return
self._timing = True
# Start a thread that will sleep for delay seconds, and empty queue
while self._timing_queue.qsize()>0: self._timing_queue.get()
SleepWorker = Thread(target=SleepUntil, args=(self._delay, self._timing_queue))
SleepWorker.setDaemon(True)
if countdown_queue is not None:
CountdownWorker = Thread(target=Countdown, args=(self._delay, countdown_queue, interval, countdown_mode))
CountdownWorker.setDaemon(True)
CountdownWorker.start()
SleepWorker.start()
self._start_time = time.time()
def elapsed(self):
"""
time since timer started
"""
if self.check():
return time.time() - self._start_time
else:
return self._delay
def remaining(self):
"""
time reamining on timer
"""
return self._delay - self.elapsed()
def check(self):
"""
see if a timer is running
"""
# if we are not timing right now, just return
if not self._timing:
return False
else:
try:
response = self._timing_queue.get_nowait()
except:
# Nothing in queue yet and still timing
return True
else:
# did get a response
self._timer_done()
if response == 'done':
return False
else:
raise RuntimeError('invalid response from timer worker')
def wait(self):
"""
wait until timer expires
returns the amount of time waited in this function
"""
# if we are not timing right now, just return
if not self._timing:
return 0.0
else:
start_time = time.time()
response = self._timing_queue.get()
self._timer_done()
if response == 'done':
return time.time() - start_time
else:
raise RuntimeError('invalid response from timer worker')
return 0.0
|
ChannelPointsSFXTrigger_StreamlabsSystem.py | # -*- coding: utf-8 -*-
#---------------------------
# Import Libraries
#---------------------------
import clr, codecs, json, os, re, sys, threading, datetime, System
# Include the assembly with the name AnkhBotR2
clr.AddReference([asbly for asbly in System.AppDomain.CurrentDomain.GetAssemblies() if "AnkhBotR2" in str(asbly)][0])
import AnkhBotR2
clr.AddReference("IronPython.Modules.dll")
# Twitch PubSub library and dependencies
lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "Lib")
clr.AddReferenceToFileAndPath(os.path.join(lib_path, "Microsoft.Extensions.Logging.Abstractions.dll"))
clr.AddReferenceToFileAndPath(os.path.join(lib_path, "TwitchLib.Communication.dll"))
clr.AddReferenceToFileAndPath(os.path.join(lib_path, "TwitchLib.PubSub.dll"))
# from TwitchLib.PubSub import *
# from TwitchLib.PubSub.Events import *
# sys.path.append(os.path.dirname(os.path.realpath(__file__)) + r"\References")
# clr.AddReference(r"TwitchLib.PubSub.dll")
from TwitchLib.PubSub import TwitchPubSub
#---------------------------
# [Required] Script Information
#---------------------------
ScriptName = "Twitch Channel Points Alert Trigger"
Website = "https://www.twitch.tv/EncryptedThoughts"
Description = "Script to trigger Overlay Alert and/or SFX on channel point reward redemptions."
Creator = "WillDBot and EncryptedThoughts"
Version = "1.0.0-2.0.0.0"
#---------------------------
# Define Global Variables
#---------------------------
SettingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
SoundsDirectory = os.path.join(os.path.dirname(__file__), "Sounds")
ReadMe = os.path.join(os.path.dirname(__file__), "README.md")
EventReceiver = None
ThreadQueue = []
CurrentThread = None
PlayNextAt = datetime.datetime.now()
Sounds = {}
RewardCount = 15
WSEventName = ""
#---------------------------------------
# Classes
#---------------------------------------
class Settings(object):
def __init__(self, SettingsFile=None):
if SettingsFile and os.path.isfile(SettingsFile):
with codecs.open(SettingsFile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8")
else:
self.EnableDebug = False
self.WSEventName = "RedeemTwitchPointAlerts"
Reward("Test").build(None).assign(self)
for i in range(1, RewardCount + 1):
Reward(i).build(None).assign(self)
def Reload(self, jsondata):
self.__dict__ = json.loads(jsondata, encoding="utf-8", sort_keys=True, indent=2)
return
def Save(self, SettingsFile):
try:
with codecs.open(SettingsFile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8")
with codecs.open(SettingsFile.replace("json", "js"), encoding="utf-8-sig", mode="w+") as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8', sort_keys=True, indent=2)))
except:
DebugLog(ScriptName, "Failed to save settings to file.")
return
class Reward(object):
def __init__(self, index):
self.index = index
self.id = "Reward" + str(index)
def build(self, settings):
self.Name = getattr(settings, self.id + "Name", "")
self.ActivationType = getattr(settings, self.id + "ActivationType", "Immediate")
self.ImageFile = getattr(settings, self.id + "ImageFile", "")
self.Font = getattr(settings, self.id + "Font", "Bold 70px Bangers")
self.Duration = getattr(settings, self.id + "Duration", 5)
self.AlignHorizontal = getattr(settings, self.id + "AlignHorizontal", 5)
self.AlignVertical = getattr(settings, self.id + "AlignVertical", 5)
self.Color = getattr(settings, self.id + "Color", 5)
self.ExpandDirection = getattr(settings, self.id + "ExpandDirection", 5)
self.TransitionType = getattr(settings, self.id + "TransitionType", "Scale")
self.SFXFile = getattr(settings, self.id + "SFXFile", "")
self.Volume = getattr(settings, self.id + "Volume", 100)
return self
def assign(self, obj):
setattr(obj, self.id + "Name", self.Name)
setattr(obj, self.id + "ActivationType", self.ActivationType)
setattr(obj, self.id + "ImageFile", self.ImageFile)
setattr(obj, self.id + "Font", self.Font)
setattr(obj, self.id + "Duration", self.Duration)
setattr(obj, self.id + "TransitionType", self.TransitionType)
setattr(obj, self.id + "AlignHorizontal", self.AlignHorizontal)
setattr(obj, self.id + "AlignVertical", self.AlignVertical)
setattr(obj, self.id + "Color", self.Color)
setattr(obj, self.id + "ExpandDirection", self.ExpandDirection)
setattr(obj, self.id + "SFXFile", self.SFXFile)
setattr(obj, self.id + "Volume", self.Volume)
#---------------------------
# [Required] Initialize Data (Only called on load)
#---------------------------
def Init():
global ScriptSettings
global WSEventName
ScriptSettings = Settings(SettingsFile)
ScriptSettings.Save(SettingsFile)
WSEventName = ScriptSettings.WSEventName
BuildSoundPathDict()
return
#---------------------------
# [Required] Execute Data / Process messages
#---------------------------
def Execute(data):
return
#---------------------------
# [Required] Tick method (Gets called during every iteration even when there is no incoming data)
#---------------------------
def Tick():
global PlayNextAt
if PlayNextAt > datetime.datetime.now():
return
global CurrentThread
if CurrentThread and CurrentThread.isAlive() == False:
CurrentThread = None
if CurrentThread == None and len(ThreadQueue) > 0:
DebugLog(ScriptName, "Starting new thread. " + str(PlayNextAt))
CurrentThread = ThreadQueue.pop(0)
CurrentThread.start()
return
#---------------------------
# [Optional] Parse method (Allows you to create your own custom $parameters)
#---------------------------
def Parse(parseString, userid, username, targetid, targetname, message):
return parseString
#---------------------------
# [Optional] Reload Settings (Called when a user clicks the Save Settings button in the Chatbot UI)
#---------------------------
def ReloadSettings(jsonData):
DebugLog(ScriptName, "Saving settings.")
try:
ScriptSettings.__dict__ = json.loads(jsonData)
ScriptSettings.Save(SettingsFile)
DebugLog(ScriptName, "Settings saved successfully")
except Exception as e:
DebugLog(ScriptName, str(e))
return
#---------------------------
# [Optional] Unload (Called when a user reloads their scripts or closes the bot / cleanup stuff)
#---------------------------
def Unload():
StopEventReceiver()
return
#---------------------------
# [Optional] ScriptToggled (Notifies you when a user disables your script or enables it)
#---------------------------
def ScriptToggled(state):
if state:
if EventReceiver is None:
RestartEventReceiver()
else:
StopEventReceiver()
return
#---------------------------
# StartEventReceiver (Start twitch pubsub event receiver)
#---------------------------
def StartEventReceiver():
DebugLog(ScriptName, "Starting receiver")
global EventReceiver
EventReceiver = TwitchPubSub()
EventReceiver.OnPubSubServiceConnected += EventReceiverConnected
EventReceiver.OnRewardRedeemed += EventReceiverRewardRedeemed
try:
EventReceiver.Connect()
except Exception as e:
DebugLog(ScriptName, "Unable to start event receiver. Exception: " + str(e))
#---------------------------
# StopEventReceiver (Stop twitch pubsub event receiver)
#---------------------------
def StopEventReceiver():
global EventReceiver
try:
if EventReceiver is None:
return
EventReceiver.Disconnect()
DebugLog(ScriptName, "Event receiver disconnected")
EventReceiver = None
except Exception as e:
DebugLog(ScriptName, "Event receiver already disconnected. Exception: " + str(e))
#---------------------------
# RestartEventReceiver (Restart event receiver cleanly)
#---------------------------
def RestartEventReceiver():
StopEventReceiver()
StartEventReceiver()
#---------------------------
# EventReceiverConnected (Twitch pubsub event callback for on connected event. Needs a valid UserID and AccessToken to function properly.)
#---------------------------
def EventReceiverConnected(sender, e):
oauth = AnkhBotR2.Managers.GlobalManager.Instance.VMLocator.StreamerLogin.Token.replace("oauth:", "")
headers = { "Authorization": 'OAuth ' + oauth }
data = json.loads(Parent.GetRequest("https://id.twitch.tv/oauth2/validate", headers))
userid = json.loads(data["response"])['user_id']
DebugLog(ScriptName, "Event receiver connected, sending topics for channel id: " + str(userid))
EventReceiver.ListenToRewards(userid)
EventReceiver.SendTopics(oauth)
return
#---------------------------
# EventReceiverRewardRedeemed (Twitch pubsub event callback for a detected redeemed channel point reward.)
#---------------------------
def EventReceiverRewardRedeemed(sender, e):
DebugLog(ScriptName, "Event triggered: " + str(e.TimeStamp) + " ChannelId: " + str(e.ChannelId) + " Login: " + str(e.Login) + " DisplayName: " + str(e.DisplayName) + " Message: " + str(e.Message) + " RewardId: " + str(e.RewardId) + " RewardTitle: " + str(e.RewardTitle) + " RewardPrompt: " + str(e.RewardPrompt) + " RewardCost: " + str(e.RewardCost) + " Status: " + str(e.Status))
for i in range(1, RewardCount + 1):
rewardId = "Reward" + str(i)
rewardName = getattr(ScriptSettings, rewardId + "Name")
if e.RewardTitle == rewardName and not rewardName.isspace():
rewardType = getattr(ScriptSettings, rewardId + "ActivationType")
if (
(rewardType == "Both")
or (rewardType == "Immediate" and "FULFILLED" in e.Status)
or (rewardType == r"On Reward Queue Accept/Reject" and "ACTION_TAKEN" in e.Status)
):
ThreadQueue.append(threading.Thread(target=RewardRedeemedWorker,args=(e, str(i))))
return
#---------------------------
# RewardRedeemedWorker (Worker function to be spun off into its own thread to complete without blocking the rest of script execution.)
#---------------------------
def RewardRedeemedWorker(event, rewardIndex):
DebugLog(ScriptName, "Redeeming reward #" + rewardIndex)
reward = Reward(rewardIndex).build(ScriptSettings)
DebugLog(ScriptName, json.dumps(reward.__dict__))
ImageWorker(event, reward)
SoundWorker(reward)
global PlayNextAt
PlayNextAt = datetime.datetime.now() + datetime.timedelta(0, delay)
#---------------------------
# ImageWorker (Worker function to perform the alert functionality given the reward index..)
#---------------------------
def ImageWorker(event, reward):
# DebugLog(ScriptName, str(event))
DebugLog(ScriptName, reward.ImageFile + " " + str(reward.TransitionType) + " " + str(reward.Duration))
reward.Message = event.DisplayName + " redeemed " + event.RewardTitle
if event.Message:
reward.Message += "\n" + event.Message
Parent.BroadcastWsEvent(WSEventName, json.dumps(reward.__dict__))
#---------------------------
# SoundWorker (Worker function to check for the existence of the sound files then plays them)
#---------------------------
def SoundWorker(reward):
global Sounds
soundFilePath = None
try:
DebugLog("Checking", str(reward.SFXFile) + " is in " + str(Sounds.Keys))
soundFilePath = Sounds[reward.SFXFile]
except:
DebugLog("MissingSound", reward.SFXFile)
if soundFilePath is not None:
DebugLog(ScriptName, "PlayingSound " + reward.SFXFile + " at vol: " + str(reward.Volume))
DebugLog(ScriptName, soundFilePath)
Parent.PlaySound(soundFilePath, reward.Volume/100.0)
#--------------------------
# BuildSoundPathDict (Builds the list of available sounds and their full paths)
#--------------------------
def BuildSoundPathDict():
global Sounds
for (dirpath, dirnames, filenames) in os.walk(SoundsDirectory):
for filename in filenames:
Sounds[filename] = os.sep.join([dirpath, filename])
DebugLog("FoundSounds", Sounds.Keys)
#---------------------------
# TestWS (Attached to settings button to open the readme file in the script bin.)
#---------------------------
def TestWSEvent():
reward = Reward("Test").build(ScriptSettings)
reward.Message = "Testing The WebSocket"
rewardString = json.dumps(reward.__dict__)
DebugLog("Testing", rewardString)
Parent.BroadcastWsEvent(WSEventName, rewardString)
#---------------------------
# OpenReadme (Attached to settings button to open the readme file in the script bin.)
#---------------------------
def OpenReadMe():
os.startfile(ReadMe)
#---------------------------
# DebugLog (Attached to settings button to open the readme file in the script bin.)
#---------------------------
def DebugLog(name, msg):
if ScriptSettings.EnableDebug:
Parent.Log(name, str(msg))
|
test.py | import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
log_imgs=0, # number of logged images
compute_loss=None):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
data = yaml.load(f, Loader=yaml.FullLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs, wandb = min(log_imgs, 100), None # ceil
try:
import wandb # Weights & Biases
except ImportError:
log_imgs = 0
# Dataloader
if not training:
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True,
prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
inf_out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if plots and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc <= 20 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
wandb.log({"Images": wandb_images})
wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements()
if opt.task in ['val', 'test']: # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
)
elif opt.task == 'study': # run over a range of settings and save/plot
for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
x = list(range(320, 800, 64)) # x axis
y = [] # y axis
for i in x: # img-size
print('\nRunning %s point %s...' % (f, i))
r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(f, x) # plot
|
es_index_listener.py | """\
Example.
%(prog)s production.ini
"""
from webtest import TestApp
from snovault import STORAGE
from snovault.elasticsearch import ELASTIC_SEARCH
import atexit
import datetime
import elasticsearch.exceptions
import json
import logging
import os
import psycopg2
import select
import signal
import socket
import sqlalchemy.exc
import sys
import threading
import time
from urllib.parse import parse_qsl
log = logging.getLogger(__name__)
EPILOG = __doc__
DEFAULT_TIMEOUT = 60
PY2 = sys.version_info[0] == 2
# We need this because of MVCC visibility.
# See slide 9 at http://momjian.us/main/writings/pgsql/mvcc.pdf
# https://devcenter.heroku.com/articles/postgresql-concurrency
def run(testapp, timeout=DEFAULT_TIMEOUT, dry_run=False, path='/index', control=None, update_status=None):
assert update_status is not None
timestamp = datetime.datetime.now().isoformat()
update_status(
status='connecting',
timestamp=timestamp,
timeout=timeout,
)
# Make sure elasticsearch is up before trying to index.
if path == '/index_file':
es = testapp.app.registry['snp_search']
else:
es = testapp.app.registry[ELASTIC_SEARCH]
es.info()
max_xid = 0
DBSession = testapp.app.registry[STORAGE].write.DBSession
engine = DBSession.bind # DBSession.bind is configured by app init
# noqa http://docs.sqlalchemy.org/en/latest/faq.html#how-do-i-get-at-the-raw-dbapi-connection-when-using-an-engine
connection = engine.pool.unique_connection()
try:
connection.detach()
conn = connection.connection
conn.autocommit = True
conn.set_session(readonly=True)
sockets = [conn]
if control is not None:
sockets.append(control)
recovery = None
listening = False
with conn.cursor() as cursor:
while True:
if not listening:
# cannot execute LISTEN during recovery
cursor.execute("""SELECT pg_is_in_recovery();""")
recovery, = cursor.fetchone()
if not recovery:
# http://initd.org/psycopg/docs/advanced.html#asynchronous-notifications
cursor.execute("""LISTEN "snovault.transaction";""")
log.debug("Listener connected")
listening = True
cursor.execute("""SELECT txid_current_snapshot();""")
snapshot, = cursor.fetchone()
timestamp = datetime.datetime.now().isoformat()
update_status(
listening=listening,
recovery=recovery,
snapshot=snapshot,
status='indexing',
timestamp=timestamp,
max_xid=max_xid,
)
try:
res = testapp.post_json(path, {
'record': True,
'dry_run': dry_run,
'recovery': recovery,
})
except Exception as e:
timestamp = datetime.datetime.now().isoformat()
log.exception('index failed at max xid: %d', max_xid)
update_status(error={
'error': repr(e),
'max_xid': max_xid,
'timestamp': timestamp,
})
else:
timestamp = datetime.datetime.now().isoformat()
result = res.json
result['stats'] = {
k: int(v) for k, v in parse_qsl(
res.headers.get('X-Stats', ''))
}
result['timestamp'] = timestamp
update_status(last_result=result)
if result.get('indexed', 0):
update_status(result=result)
log.info(result)
update_status(
status='waiting',
timestamp=timestamp,
max_xid=max_xid,
)
# Wait on notifcation
readable, writable, err = select.select(sockets, [], sockets, timeout)
if err:
raise Exception('Socket error')
if control in readable:
command = control.recv(1)
log.debug('received command: %r', command)
if not command:
# Other end shutdown
return
if conn in readable:
conn.poll()
while conn.notifies:
notify = conn.notifies.pop()
xid = int(notify.payload)
max_xid = max(max_xid, xid)
log.debug('NOTIFY %s, %s', notify.channel, notify.payload)
finally:
connection.close()
class ErrorHandlingThread(threading.Thread):
if PY2:
@property
def _kwargs(self):
return self._Thread__kwargs
@property
def _args(self):
return self._Thread__args
@property
def _target(self):
return self._Thread__target
def run(self):
timeout = self._kwargs.get('timeout', DEFAULT_TIMEOUT)
update_status = self._kwargs['update_status']
control = self._kwargs['control']
while True:
try:
self._target(*self._args, **self._kwargs)
except (psycopg2.OperationalError, sqlalchemy.exc.OperationalError, elasticsearch.exceptions.ConnectionError) as e:
# Handle database restart
log.warning('Database not there, maybe starting up: %r', e)
timestamp = datetime.datetime.now().isoformat()
update_status(
timestamp=timestamp,
status='sleeping',
error={'error': repr(e), 'timestamp': timestamp},
)
readable, _, _ = select.select([control], [], [], timeout)
if control in readable:
command = control.recv(1)
log.debug('received command: %r', command)
if not command:
# Other end shutdown
return
log.debug('sleeping')
time.sleep(timeout)
continue
except Exception:
# Unfortunately mod_wsgi does not restart immediately
log.exception('Exception in listener, restarting process at next request.')
os.kill(os.getpid(), signal.SIGINT)
break
def composite(loader, global_conf, **settings):
listener = None
# Register before testapp creation.
@atexit.register
def join_listener():
if listener:
log.debug('joining listening thread')
listener.join()
path = settings.get('path', '/index')
# Composite app is used so we can load the main app
app_name = settings.get('app', None)
app = loader.get_app(app_name, global_conf=global_conf)
username = settings.get('username', 'IMPORT')
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
testapp = TestApp(app, environ)
# Use sockets to integrate with select
controller, control = socket.socketpair()
timestamp = datetime.datetime.now().isoformat()
status_holder = {
'status': {
'status': 'starting listener',
'started': timestamp,
'errors': [],
'results': [],
},
}
def update_status(error=None, result=None, indexed=None, **kw):
# Setting a value in a dictionary is atomic
status = status_holder['status'].copy()
status.update(**kw)
if error is not None:
status['errors'] = [error] + status['errors'][:2]
if result is not None:
status['results'] = [result] + status['results'][:9]
status_holder['status'] = status
kwargs = {
'testapp': testapp,
'control': control,
'update_status': update_status,
'path': path,
}
if 'timeout' in settings:
kwargs['timeout'] = float(settings['timeout'])
listener = ErrorHandlingThread(target=run, name='listener', kwargs=kwargs)
listener.daemon = True
log.debug('starting listener')
listener.start()
# Register before testapp creation.
@atexit.register
def shutdown_listener():
log.debug('shutting down listening thread')
control # Prevent early gc
controller.shutdown(socket.SHUT_RDWR)
def status_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
start_response(status, response_headers)
return [json.dumps(status_holder['status'])]
return status_app
def internal_app(configfile, app_name=None, username=None):
from webtest import TestApp
from pyramid import paster
app = paster.get_app(configfile, app_name)
if not username:
username = 'IMPORT'
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
return TestApp(app, environ)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Listen for changes from postgres and index in elasticsearch",
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument(
'--username', '-u', default='INDEXER', help="Import username")
parser.add_argument(
'--dry-run', action='store_true', help="Don't post to ES, just print")
parser.add_argument(
'-v', '--verbose', action='store_true', help="Print debug level logging")
parser.add_argument(
'--poll-interval', type=int, default=DEFAULT_TIMEOUT,
help="Poll interval between notifications")
parser.add_argument(
'--path', default='/index',
help="Path of indexing view (/index or /index_file)")
parser.add_argument('config_uri', help="path to configfile")
args = parser.parse_args()
logging.basicConfig()
testapp = internal_app(args.config_uri, args.app_name, args.username)
# Loading app will have configured from config file. Reconfigure here:
if args.verbose or args.dry_run:
logging.getLogger('snovault').setLevel(logging.DEBUG)
return run(testapp, args.poll_interval, args.dry_run, args.path)
if __name__ == '__main__':
main()
|
app.py | from flask import Flask
from flask.blueprints import Blueprint
from flask_cors import CORS
import routes
import config
import threading
import logging
from services.kf_services import process_HTML_kf
import time
html2json_app = Flask(__name__)
log = logging.getLogger('file')
try:
t1 = threading.Thread(target=process_HTML_kf, name='keep_on_running')
t1.start()
log.info("multithread started")
except Exception as e:
log.error('ERROR WHILE RUNNING CUSTOM THREADS %s'%e)
if config.ENABLE_CORS:
cors = CORS(html2json_app, resources={r"/api/*": {"origins": "*"}})
for blueprint in vars(routes).values():
if isinstance(blueprint, Blueprint):
html2json_app.register_blueprint(blueprint, url_prefix=config.API_URL_PREFIX)
if __name__ == "__main__":
html2json_app.run(host=config.HOST, port=config.PORT, debug=False)
|
connection.py | # Modules names in alphabetical order
import os,re,select,socket,sys,threading,time
class ConnectionError(Exception):
def __init__(self,name,code,mesg):
self.name = name
self.code = int(code)
self.mesg = mesg
def __str__(self):
return "[Connection Error %d] %s : %s" % (self.code,self.name,self.mesg)
class Connection:
"""
Written by Kaustubh Karkare.
The connection class aims to provide an extremely simple way to set up TCP/UDP Servers/Clients.
Connections are creating using the statement similar to:
link = connection({ "name":"TCP-Client", "host":"192.168.0.1", "port":12345, "type":"tcp", "role":"client", "handler":somefunction })
The "name", "host", "port", "type" and "role" key-value pairs are non-optional. Each of these, along with the additional options are described:
name : The name of the connection which used to identify it during debugging. It is recommended that you provide a unique name to each connection. In case of a server, each accepted connection is named by suffixing the remote host and port addresses to the name of the server connection.
host : The IP Address or Host Name (which should be DNS resolvable) to which a connection is to be made (in clients) or at which incoming connections/data are/is to be recieved (in servers).
port : The Port Number to be connected to (in clients) or listened at (in servers).
type : Type of connection, must be either "tcp" or "udp".
role : Role of this connection, muct be either "server" or "client".
handler : A function that will be called whenever new data is recieved, described in more detail later.
debug : A file-like object to which all debugging messages will be sent, with the connection name suffixed (default value = None).
family : Type of socket family, must be "ipv4".
maxconn : The maximum number of connections that a server should handle at a time. The server rejects any additional incoming connections (default value = 10).
poll : The amount of time between checks for new data, in seconds (default value = 1).
buffer : The size of the data that is to be read from the stream, in bytes (default value = 1024).
Handler Functions:
In case of a TCP-Server, whenever a client tries to set up a connection to it, a new TCP-Client-type connection object is created, and the handler function is passed down.
In case of a TCP-Client, whenever new data is recieved, the handler function is called with three arguments: data (which was just recieved), info and args (a dictionary that contains additional data to be passed to this function is specified during creation of the connection).
The info argument contains a dictionary containing useful information about the connection like remote host, port and pointers to functions to send data to the remote host, to terminate the client connection, and to kill the spawning server too.
Whatever this function returns will be saved in the connection as args, and provided to it during the next function call. If args is a dictionary, and contains a key "binary", whose value is True, logging of recieved TCP data is disabled, until this is changed. This is useful while transferring large amounts of binary data.
In case the data to be recieved exceeded the buffer-size, it is the responsibility of the handler function to keep records and append the different pieces together.
The handler function call in case of a TCP-Client is blocking - no new data will be read from the stream till the function returns.
There are however two exceptional cases where the above rules do not apply: When the connection is initially established, or is being terminated, this function will be called with the data argument set to None. Note however, that the termination call is unreliable.
In case of UDP-Servers, the handler function is defined the same way is that of the TCP-Client.
The return value of this function is ignored completely, as it would only create problems due to the multithreaded nature in which is is called.
However, it is non-blocking - each handler function call has its own thread and therefore, the function must be thread-safe.
In case of UDP-Clients, the handler function is not required, and may be omitted from the constructor itself.
The various functions that are available to you for use are (the self argument has been omitted for simplicity's sake in the following list):
__init__(data): Takes a dictionary type object and calls configure(data), followed by setup()
configure(data):
Verifies the values provided in the dictionary type object provided and makes the connection ready for use.
All Configuration variables MUST be set using the configure() function.
If the connection was active when you change the Configuration, the original connection will be lost, and a new one will be automatically setup. You may continue to use the same connection object.
setup():
The setup() function establishes links with the server (if you are setting up a client) or starts listening for connections on the given port.
In a server, an infinite loop (that can be terminated using the close() function) begins that checks for incoming connections and creates a new client object when they do to interact with them.
In a client, a similar infinite loop (terminated using close()) keeps checking for data that is recieved from the remote system.
This function is non-blocking as the above loops are run in a parallel thread.
info(): Returns a string describing the connection.
active(): Returns a boolean value that indicates whether or not the connection is active.
clients(): Returns a boolean value that indicates whether or not this TCP Server has had or still has active connections with clients.
send(): Sends data over the connection to the remote host, as specified in the Configuration.
wait(): Suspends execution (in the calling thread) until this connection is terminated.
close():
In case of a TCP Server, closing the connection (using close()) would result in the closing of all objects that were created in response to clients that connected to this server.
This process is blocking - execution is suspended until the termination process is complete.
Error Codes:
1 : Missing option required for Configuration.
2 : Attempt to modify read-only data during Configuration.
3 : Invalid data provided for options used in Configuration.
4 : Could not bind to given port as it is already bound to some other connection/socket.
5 : Could not bind to specified socket.
6 : Connection terminated by remote host.
"""
def spawn(self,name,function,arguments=()):
"Takes another function and a tuple/list/dict object as arguments, and starts it off in a new thread, returning a reference to the Thread object."
"The functions should have handled all exceptions that might occur, or must not raise any, to ensure proper functioning."
newthread = threading.Thread(name=name, target=function, args=(arguments if type(arguments) in (tuple,list) else ()), kwargs=(arguments if type(arguments) is dict else {}) )
newthread.start()
return newthread
def __init__(self,data,link=None):
"Takes a dictionary object and uses that information to configure this connection."
self._config = {"ready":False,"active":False}
if link is None:
self.configure(data)
self.setup()
else: # In case of connections made to TCP Server, TCP Client objects are created, with socket objects already available.
self._config = data
self._config["socket"] = link
self._config["active"] = True
self._config["info"] = { "host":self._config["host"], "port":self._config["port"], "send":self.send, "close":self.close_internal, "kill":self._config["parent"].close_internal }
self._config["args"] = self._config["handler"](None,self._config["info"],self._config["args"])
self._config["thread"] = self.spawn(self.info(),self.loop)
self._config["ready"] = True
def __del__(self):
"Terminates the connection."
self.close()
def debug(self,data):
try: self._config["debug"](time.strftime("%d-%b-%Y %H:%M:%S",time.localtime())+" "+self._config["name"]+" : "+data+"\n")
except: pass
def configure(self,data): # Allows the setting of certain required Configuration variables and the overriding the default values of other predefined ones.
if self._config["active"]:
self.close();
interrupt = True;
else: interrupt = False
self._config = { } # A container for all configuration variables to be used.
self._config["name"] = "Anonymous" # Default connection Name
self._config["ready"] = False # At this time, the connection is not configured to be ready for operation.
self._config["active"] = False # Is this connection actively listening (servers) / connected (clients) right now?
self._config["args"] = None # The arguments to be passed to and returned from the handler function when new data arrives.
self._config["type"] = "tcp" # Transport Layer Protocol: tcp or udp
self._config["family"] = "ipv4" # Internet Layer Protocol: ipv4, ipv6
self._config["maxconn"] = 1 # Maximum number of client connections that take server will take before rejecting additional ones.
self._config["poll"] = 1 # The time intervals, in seconds, at which new data is checked for.
self._config["buffer"] = 1024 # The size of the buffer in which the data that arrives if to be read.
self._config["debug"] = None # The function to which to send debugging information.
self._config["parent"] = None # In case of spawned TCP Clients, it is a pointer to the source server.
self._config["link"] = [] # In case of TCP Servers, a list of all Clients spawned in response to connections.
self._config["clients"] = False # Has this server had any client connections till now?
if type(data) is not dict: return self # The
for key in ("name","host","port","role","type"):
if key not in data: raise ConnectionError(self._config["name"],1,"Missing option '"+key+"'.")
for key in data:
if key in ("ready","active","socket","clients"): raise ConnectionError(self._config["name"],2,"Attempt to modify read-only attribute '"+key+"'.")
if key in ("port","maxconn","poll","buffer"):
try: self._config[key]=int(data[key])
except ValueError: raise ConnectionError(self._config["name"],3,"Invalid value provided for '"+key+"' option.")
elif key is "role" and data[key] not in ("server","client"): raise ConnectionError(self._config["name"],3,"Invalid value provided for 'role' option.")
elif key is "type" and data[key] not in ("tcp","udp"): raise ConnectionError(self._config["name"],3,"Invalid value provided for 'type' option.")
elif key is "family" and data[key] not in ("ipv4"): raise ConnectionError(self._config["name"],3,"Invalid value provided for 'family' option.")
else: self._config[key] = data[key]
if self._config["type"] is not "udp" or self._config["role"] is not "client":
if "handler" not in data: raise ConnectionError(self._config["name"],1,"Missing option '"+handler+"'.")
self._config["ready"] = True
self.debug("Configuration completed successfully : "+str(self._config))
if interrupt: self.setup()
return self
def initiate(self):
"Sets up connections with the remote host, and getting them ready for data transfer."
if not self._config["ready"]: return self
try:
family = socket.AF_INET
if self._config["type"] is "tcp" and self._config["role"] is "server":
self._config["socket"] = socket.socket(family,socket.SOCK_STREAM)
self._config["socket"].bind( (self._config["host"],self._config["port"]) )
self._config["socket"].listen(self._config["maxconn"])
self.debug("TCP Server set up and listening at "+self._config["host"]+":"+str(self._config["port"])+".")
elif self._config["type"] is "tcp" and self._config["role"] is "client":
self._config["socket"] = socket.socket(family,socket.SOCK_STREAM)
self._config["socket"].connect( (self._config["host"],self._config["port"]) )
self.debug("TCP Client set up and connected to "+self._config["host"]+":"+str(self._config["port"])+".")
self._config["info"] = { "host":self._config["host"], "port":self._config["port"], "send":self.send, "close":self.close_internal, "kill":self.close_internal }
self._config["active"] = True
self._config["args"] = self._config["handler"]( None, self._config["info"], self._config["args"] )
elif self._config["type"] is "udp" and self._config["role"] is "server":
self._config["socket"] = socket.socket(family,socket.SOCK_DGRAM)
self._config["socket"].bind( (self._config["host"],self._config["port"]) )
self.debug("UDP Server set up and listening at "+self._config["host"]+":"+str(self._config["port"])+".")
elif self._config["type"] is "udp" and self._config["role"] is "client":
self._config["socket"] = socket.socket(family,socket.SOCK_DGRAM)
self.debug("UDP Client set up to connect to "+self._config["host"]+":"+str(self._config["port"])+".")
self._config["active"] = True
except socket.error, e:
if e.errno==10048: raise ConnectionError(self._config["name"],5,"Could not bind to "+self._config["type"].upper()+" port "+str(self._config["port"])+".")
return self
def loop(self):
"An infinite loop that waits for data to be recieved from the remote host, and calls a handler function when it arrives."
if self._config["type"] is "udp" and self._config["role"] is "client": return self
try:
while self._config["active"]: # Verify that no other process has tried to kill this _config
if self._config["socket"] not in select.select([self._config["socket"]],[],[],1)[0]: # Verify that data is indeed available to read
time.sleep( self._config["poll"] ); continue; # If not, wait for some time and recheck
if self._config["type"] is "tcp" and self._config["role"] is "server":
link,addr = self._config["socket"].accept()
self.debug("Accepted TCP connection from "+addr[0]+":"+str(addr[1])+" at "+self._config["host"]+":"+str(self._config["port"])+".")
data = dict( [(x,y) for x,y in self._config.items() if x not in ("ready","active","socket","link")] );
data["name"]+=" - "+data["host"]+":"+str(data["port"]);
data["parent"]=self; data["host"]=addr[0]; data["port"]=addr[1]; data["role"]="client";
self._config["clients"] = True;
self._config["link"].append( Connection(data,link) )
elif self._config["type"] is "tcp" and self._config["role"] is "client":
try: data = self._config["socket"].recv( self._config["buffer"] )
except socket.error, e:
if e.errno==10054: raise ConnectionError(self._config["name"],6,"The connection has been terminated by the remote host "+self._config["host"]+":"+str(self._config["port"])+".")
if not data: break
if type(self._config["args"]) is dict and ("binary" not in self._config["args"] or self._config["args"]["binary"] is False):
self.debug("Recieved data from TCP connection "+self._config["host"]+":"+str(self._config["port"])+" : "+data)
self._config["args"] = self._config["handler"]( data , self._config["info"] , self._config["args"] );
elif self._config["type"] is "udp" and self._config["role"] is "server":
data,addr = self._config["socket"].recvfrom( self._config["buffer"] )
self.debug("Accepted UDP data from "+addr[0]+":"+str(addr[1])+" : "+data)
self.spawn(self.info()+" Handler", self._config["handler"], ( data , { "host":addr[0], "port":addr[1] } , self._config["args"] ) );
except ConnectionError, e: pass
self._config["active"] = False
self._config["socket"].close()
if self._config["type"] is "tcp" and self._config["role"] is "client":
self._config["handler"]( None, self._config["info"], self._config["args"] )
self.debug("Terminated connection at "+self._config["host"]+":"+str(self._config["port"])+".")
return self
def setup(self):
"Initiate the connection and starts listening for data sent by the remote host."
if self._config["active"]: return self
self.initiate()
if self._config["active"]: self._config["thread"] = self.spawn(self.info(),self.loop)
return self
def close(self): # Terminates the connection.
if self._config["active"]:
while("link" in self._config and len(self._config["link"])>0): self._config["link"][0].close() # Terminate all spawned connections.
self._config["active"] = False # Disable the main loop
if self._config["thread"].isAlive(): self._config["thread"].join() # Wait till the current main loop cycle ends
if self._config["parent"] is not None: self._config["parent"]._config["link"].remove(self) # Break link from parent to ensure destruction.
return self
def close_internal(self): # Exists so that it is possible to terminate the loop thread from inside itself
self._config["active"] = False # Terminates main loop at the end of this cycle.
def send(self,data):
"Sends given data to the remote host."
if len(data)==0: return
if self._config["active"]:
if self._config["role"] is "client":
if self._config["type"] is "tcp":
try: self._config["socket"].send(data)
except socket.error, e:
if e.errno==10054: raise ConnectionError(self._config["name"],6,"The connection has been terminated by the remote host "+self._config["host"]+":"+str(self._config["port"])+".")
elif self._config["type"] is "udp": self._config["socket"].sendto(data, (self._config["host"],self._config["port"]) )
if type(self._config["args"]) is dict and ("binary" not in self._config["args"] or self._config["args"]["binary"] is False):
self.debug("Sent data to "+self._config["host"]+":"+str(self._config["port"])+" : "+data)
else: self.debug("Could not send data as this is a Server : "+data)
else: self.debug("Could not send data to "+self._config["host"]+":"+str(self._config["port"])+" as this connection is no longer active : "+data)
return self
def wait(self):
"Suspends execution till the currently established connection terminated."
self._config["thread"].join()
return self
def info(self):
"Returns a string describing the connection."
return self._config["type"].upper()+"-"+self._config["role"][0].upper()+self._config["role"][1:].lower()+" "+self._config["host"]+":"+str(self._config["port"])+" "+self._config["name"]
def active(self):
"Returns a boolean value indicating whether or not the current connection is active."
return self._config["active"]
def clients(self): # Returns whether or not this TCP server has actively connected clients.
return self._config["clients"]
def __str__(self):
return "[Connection %s : %s %s at %s:%s]" % (self._config["name"],self._config["type"],self._config["role"],self._config["host"],self._config["port"])
if __name__=="__main__":
def tcpc(data,info,args):
if data is not None: print "Recieved Data :", data
def udps(data,info,args):
if data is not None: print data
x = int(raw_input("Enter Connection Type (1=TCPS, 2=TCPC, 3=UDPS, 4=UDPC) : "))
if x>0 and x<5:
port = raw_input("Enter the source/client port number : ")
else:
#port1 = raw_input("Enter server port number : ")
#port2 = raw_input("Enter client port number : ")
port1,port2 = 81,80
debug = sys.stdout
try:
if x==1:
c = Connection({"name":"TCPS","host":"localhost","port":port,"role":"server","type":"tcp","handler":tcpc,"debug":debug}).wait()
elif x==2:
c = Connection({"name":"TCPC","host":"localhost","port":port,"role":"client","type":"tcp","handler":tcpc,"debug":debug})
while c.active():
print "Send Data :",
c.send(raw_input()+"\n")
elif x==3:
c = Connection({"name":"UDPS","host":"localhost","port":port,"role":"server","handler":udps,"type":"udp"}).wait()
elif x==4:
c = Connection({"name":"UDPC","host":"localhost","port":port,"role":"client","type":"udp"})
while c.active(): c.send(raw_input())
except KeyboardInterrupt: c.close()
|
P2PChat-UI.py | #!/usr/bin/python3
# Student name and No.:
# Student name and No.:
# Development platform:
# Python version: 3.6
# Version:1
##Hi
from tkinter import *
import sys
import socket
import threading
import time
import select
#
# Global variables
#
#user stuff
myUsername = ""
myHashID = None
myAddress = 'localhost'
myPort = 32341
isJoined = False #to check whether the user has already joined a chatroom
keepAlive = True
roomMemberDict = dict() #data structure = myUsername: (ip, port)
#chatroom stuff
roomMemberHash = None
joinedRoomName = ""
#theading stuff
keep_alive_thread = None
server_thread = None
connection_thread = None
#socket stuff
msgID = 0
serverPort = 32340
serverAddress = "localhost"
socketfd = None
forwardLinkedMember = ()
backwardLinkedMemberDict = dict() #name: socket
socketUdpReceiver = None
socketTCPServer = None
RList = []
WList = []
#messging
messageCounter = dict()
#
# This is the hash function for generating a unique
# Hash ID for each peer.
# Source: http://www.cse.yorku.ca/~oz/hash.html
#
# Concatenate the peer's myUsername, str(IP address),
# and str(Port) to form a string that be the input
# to this hash function
#
def sdbm_hash(instr):
hash = 0
for c in instr:
hash = int(ord(c)) + (hash << 6) + (hash << 16) - hash
return hash & 0xffffffffffffffff
#
# Functions to handle user input
#
def do_User():
tempName = userentry.get()
global myUsername, myHashID, myPort,myAddress
outstr = "\n[User] username: "
if (len(tempName) > 0 and not isJoined):
if (not myUsername):
myUsername = str(tempName)
outstr += myUsername
myHashID = sdbm_hash(myUsername+myAddress+str(myPort))
else:
if(not tempName == myUsername):
oldName = myUsername
myUsername = str(tempName)
outstr += "Changed from " + oldName + " to " + myUsername
else:
outstr += "Remains the same as " + myUsername
else:
if (isJoined):
outstr += "Already joined a chatroom. Rejected"
else:
outstr += "Empty entry. Rejected"
CmdWin.insert(1.0, outstr)
userentry.delete(0, END)
def do_List():
CmdWin.insert(1.0, "\nPress List")
# create socket and connect to Comm_pipe
global socketfd
if not socketfd:
socketfd = socket.socket()
try:
socketfd.connect((serverAddress, serverPort))
print("My socket address is ", socketfd.getsockname())
except socket.error as err:
print("Connection error: " ,err)
sys.exit(1)
# send the message
msg = "L::\r\n"
try:
socketfd.sendall(msg.encode('ascii'))
except socket.error as err:
print("Sending error: ", err)
# receive the message
rmsg = socketfd.recv(1024)
dmsg = rmsg.decode('ascii')
names = dmsg[2:-4].split(":")
outstr = ""
for name in names:
if name:
outstr += name + ","
CmdWin.insert(1.0, "\n[List] list of chatrooms: " + outstr)
def do_Join():
global isJoined,socketfd,joinedRoomName, keepAlive,keep_alive_thread,roomMemberDict,roomMemberHash,server_thread
CmdWin.insert(1.0, "\nPress JOIN")
if not myUsername:
CmdWin.insert(1.0, "\n[JOIN] Request rejected.You should register a username first.")
return
if isJoined:
CmdWin.insert(1.0, "\n[JOIN] Request rejected.You have already joined a chatroom.")
return
#get the target chatroom name from the entry
roomName = userentry.get()
if not roomName:
CmdWin.insert(1.0, "\n[JOIN] Request rejected. Please enter your target chatroom name.")
return
rmsg = joinRoom(roomName)
# if the message is successfully received
if rmsg:
if(rmsg.decode("ascii")[0]=='M'):
startServer()
joinedRoomName = roomName #save the roomname to joinedRoomName
roomMemberHash, membersInfo = decodeResponse(rmsg)
roomMemberDict = create_member_record(membersInfo) #this is where we store the actual member info
if (len(roomMemberDict)>1):
create_forward_link()
else:
CmdWin.insert(1.0, "\n[JOIN]"+ " Chatroom " + joinedRoomName + " created")
print("Chatroom " + joinedRoomName + " created")
isJoined = True
keepAlive = True
startConnectionHandler()
startKeepAlive()
userentry.delete(0, END)
def startKeepAlive():
# a new thread for keeping the connection between the peer and the server alive
keep_alive_thread = threading.Thread(target=keep_alive)
keep_alive_thread.daemon = True
keep_alive_thread.start()
def startServer():
#a new thread called server_thread is established for listening poke request
server_thread = threading.Thread(target=run_server)
server_thread.daemon = True
server_thread.start()
def decodeResponse(rmsg):
#decode the message
dmsg = rmsg.decode('ascii')
data_string = dmsg[2:-4].split(":") #generate a list of member in the rooms
print(dmsg[0])
if (dmsg[0] == 'M'):
return data_string[0], data_string[1:] #roomHash, memberlist
elif (dmsg[0] == 'A'):
return data_string[0],data_string[1] #roomname, sendername
elif (dmsg[0] == 'P'):
return data_string[0], data_string[1], data_string[2], data_string[3], data_string[4]
elif (dmsg[0] == 'T'):
length = data_string[4]
content = dmsg[-(int(length)+4):-4]
return data_string[0], data_string[1], data_string[2], data_string[3],length, content
elif (dmsg[0] == 'S'):
return data_string[0]
elif (dmsg[0] == 'K'):
return data_string[0],data_string[1]
def updateMemberList():
res = joinRoom(joinedRoomName)
global roomMemberDict
if res:
tempHash, tempMember = decodeResponse(res)
if (tempHash != roomMemberHash):
roomMemberDict = create_member_record(tempMember)
def joinRoom(roomName):
global socketfd
if not socketfd:
socketfd = socket.socket()
try:
socketfd.connect((serverAddress, serverPort))
except socket.error as err:
print("Connection error: " ,err)
sys.exit(1)
# send the message
rm = ":" + roomName
un = ":" + myUsername
ip = ":" + myAddress
pt = ":" + str(myPort)
msg = "J" + rm + un + ip + pt + "::\r\n"
try:
socketfd.sendall(msg.encode('ascii'))
except socket.error as err:
print("Sending error: ", err)
# try to receive the message
rmsg = socketfd.recv(1024)
print(len(rmsg))
if (len(rmsg)>0):
return rmsg
else:
return None
def create_forward_link():
tempList = list()
for user in roomMemberDict.items():
username = user[0]
userIp = user[1][0]
userPort = user[1][1]
userHash = sdbm_hash(str(username) + str(userIp) + str(userPort))
tempList.append((userHash,str(username), str(userIp), int(userPort)))
tempList.sort(key=lambda tup: tup[0]) #sort by userHash
print(tempList)
start = (tempList.index((myHashID,myUsername,myAddress,myPort)) + 1) % len(tempList)
while (tempList[start][0] != myHashID):
if(tempList[start][1] in backwardLinkedMemberDict):
start = (start + 1) % len(tempList)
else:
try:
if (handshake(tempList[start])):
print("Forward link successfully established. You are connected.")
return
else:
start = (start + 1) % len(tempList)
except socket.error as err:
print(err)
print(forwardLinkedMember)
if (not forwardLinkedMember):
print ("ERROR: Cannot establish TCP connection with Peer. System will start the connection procedure later")
#TO-DO: reschedule the conneciton
def startConnectionHandler():
#a new thread called server_thread is established for listening poke request
connection_thread = threading.Thread(target=connectionHandling)
connection_thread.daemon = True
connection_thread.start()
def connectionHandling():
#it runs only when it has joined a room
global forwardLinkedMember
while isJoined:
if(len(roomMemberDict)>1):
if (forwardLinkedMember):
if(forwardLinkedMember[0] not in roomMemberDict):
forwardLinkedMember = None
print("WARNING: Your forward link has dismissed.")
else:
#user is disconnected
print("WARNING: You have to connect to one forward link to make sure the system is working. System is reconnecting...")
create_forward_link()
time.sleep(1)
def handshake(user):
# create socket and connect to Comm_pipe
global forwardLinkedMember,RList,WList
tempSocket = socket.socket()
try:
print(user)
tempSocket.connect((user[2], int(user[3])))
msg = 'P:' + joinedRoomName + ':' + myUsername+':' + myAddress +':' + str(myPort) +':' + str(msgID)+'::\r\n'
tempSocket.sendall(msg.encode("ascii"))
try:
# receive the message
rmsg = tempSocket.recv(50)
mID = decodeResponse(rmsg)
RList.append(tempSocket)
WList.append(tempSocket)
forwardLinkedMember = (user[1], tempSocket) #uss username as the key to find out the socket
messageCounter[str(user[1])] = int(mID)
return True
except socket.error as err:
print("Connection error: " ,err)
except socket.error as err:
print("Connection error: " ,err)
return False
def create_member_record(raw_data_string):
tempRecord = dict()
outstr = ''
for i in range(0,len(raw_data_string)-2,3):
tempRecord[raw_data_string[i]] = (raw_data_string[i+1],int(raw_data_string[i+2])) # the data structure is a dictionary of tuples
#print(tempRecord[raw_data_string[i]])
outstr += raw_data_string[i] + ', '
if(not roomMemberDict):
CmdWin.insert(1.0, "\nMembers in the room: " + outstr)
else:
removedMember = roomMemberDict.items() - tempRecord.items()
for member in removedMember:
CmdWin.insert(1.0, "\n" + member[0] + " has left the room")
newMember = tempRecord.items() - roomMemberDict.items()
for member in newMember:
CmdWin.insert(1.0, "\n" + member[0] + " has joined the room")
global backwardLinkedMemberDict
if(backwardLinkedMemberDict):
tempBackward = dict()
for peer in backwardLinkedMemberDict.items():
if (peer[0] in tempRecord):
tempBackward[peer[0]] = peer[1]
backwardLinkedMemberDict = tempBackward
return tempRecord
def keep_alive():
global roomMemberHash, roomMemberDict
while True:
if not keepAlive:
return
rmsg = joinRoom(joinedRoomName)
if rmsg:
dmsg = rmsg.decode('ascii')
raw_data_string = dmsg[2:-4].split(":")
if roomMemberHash == raw_data_string[0]:
pass
#CmdWin.insert(1.0, "\nNo new member in the chatroom.")
else:
roomMemberHash = raw_data_string[0]
roomMemberDict = create_member_record(raw_data_string[1:])
dmsg = rmsg.decode('ascii')
print(dmsg)
time.sleep(20)
def isConnected():
return forwardLinkedMember or backwardLinkedMemberDict
def do_Send():
msg = userentry.get()
global myUsername, myHashID, myPort,myAddress,msgID
if (len(msg) > 0):
if(isJoined):
if(isConnected()):
msgID += 1
fullMsg = "T:" + joinedRoomName + ":" + str(myHashID) + ":" + myUsername + ":" + str(msgID) + ":" + str(len(msg)) +":" + msg + "::\r\n"
sendMessge(fullMsg)
MsgWin.insert(1.0, "\n" +myUsername + ": " + msg)
else:
CmdWin.insert(1.0, "\nYou are disconnected right now. Can't send any message")
else:
CmdWin.insert(1.0, "\nPlease join a chatroom")
else:
CmdWin.insert(1.0, "\nERROR: Empty entry. Rejected")
userentry.delete(0, END)
def do_Poke():
global roomMemberDict
#CmdWin.insert(1.0, "\nPress Poke")
target = userentry.get()
if isJoined:
if target:
if target != myUsername:
if target in roomMemberDict:
print("found it!")
temp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
msg = 'K:' + joinedRoomName + ':' + myUsername+'::\r\n'
temp_socket.sendto(msg.encode('ascii'),roomMemberDict[target])
temp_socket.settimeout(2)
try:
rmsg, peerAddress = temp_socket.recvfrom(64)
MsgWin.insert(1.0, "\n" + target + " has received your poke;)")
except socket.timeout:
CmdWin.insert(1.0, "\nDid not receive ACK from the peer.")
temp_socket.close()
else:
CmdWin.insert(1.0, "\nERROR: Nickname not found!.")
else:
CmdWin.insert(1.0, "\nError: You can't poke youself.")
else:
CmdWin.insert(1.0, "\nError: Please enter a valid name.")
outstr = ''
for member in roomMemberDict:
outstr += str(member) + ', '
CmdWin.insert(1.0, "\nValid name are: " + outstr)
else:
CmdWin.insert(1.0, "\nError: Please join a chatroom first.")
userentry.delete(0, END)
def run_server():
global RList, WList, socketUdpReceiver, socketTCPServer
socketUdpReceiver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socketUdpReceiver.bind((myAddress,myPort))
socketTCPServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socketTCPServer.settimeout(1.0)
try:
socketTCPServer.bind((myAddress,myPort))
print("TCP server listening...")
except socket.error as msg:
print("Socket Bind Error: " + str(msg))
# set socket listening queue
socketTCPServer.listen(5)
RList.append(socketUdpReceiver)
RList.append(socketTCPServer)
# start the main loop
while True:
# use select to wait for any incoming connection requests or
# incoming messages or 10 seconds
try:
Rready, Wready, Eready = select.select(RList, [], [], 10)
except select.error as emsg:
print("At select, caught an exception:", emsg)
sys.exit(1)
except KeyboardInterrupt:
print("At select, caught the KeyboardInterrupt")
sys.exit(1)
# if has incoming activities
if Rready:
# for each socket in the READ ready list
for sd in Rready:
# if the listening socket is ready
# that means a new connection request
# accept that new connection request
# add the new client connection to READ socket list
# add the new client connection to WRITE socket list
if sd == socketUdpReceiver:
(rmsg, peerAddress) = socketUdpReceiver.recvfrom(64)
rmName, senderName = decodeResponse(rmsg)
msg = b'A::\r\n'
MsgWin.insert(1.0, "\n" + senderName + " just poke you;)")
socketUdpReceiver.sendto(msg,peerAddress)
elif sd == socketTCPServer:
newfd, caddr = socketTCPServer.accept()
print("TCP server receive connection request")
RList.append(newfd)
WList.append(newfd)
else:
rmsg = sd.recv(500)
if rmsg:
if (rmsg.decode("ascii")[0] == 'P'):
roomname, username, userip, userport, mID = decodeResponse(rmsg)
updateMemberList()
global msgID
if(username in roomMemberDict):
msg = "S:" + str(msgID) + "::\r\n"
backwardLinkedMemberDict[str(username)] = sd
messageCounter[str(username)] = int(mID)
print("Backward link is successfully established")
sd.sendall(msg.encode("ascii"))
else:
sd.close()
elif (rmsg.decode("ascii")[0] == 'T'):
roomname, originHID, origin_username, mID, msgLength, content = decodeResponse(rmsg)
if (roomname == joinedRoomName):
if(origin_username not in roomMemberDict):
updateMemberList()
if(str(origin_username) in messageCounter):
if(messageCounter[str(origin_username)] != int(mID)):
messageCounter[str(origin_username)] = int(mID)
MsgWin.insert(1.0, "\n" + origin_username + ": " + content)
forwardMessage(originHID, origin_username, rmsg)
else:
messageCounter[str(origin_username)] = int(mID)
MsgWin.insert(1.0, "\n" + origin_username + ": " + content)
forwardMessage(originHID, origin_username, rmsg)
else:
print("ERROR: Received message from a person out of the room")
CmdWin.insert(1.0, "ERROR: Received message from a person out of the room")
else:
print("A client connection is broken!!")
WList.remove(sd)
RList.remove(sd)
# else did not have activity for 10 seconds,
# just print out "Idling"
else:
print("Server Idling")
def sendMessge(msg):
if(forwardLinkedMember):
forwardLinkedMember[1].sendall(msg.encode("ascii"))
if(backwardLinkedMemberDict):
for sd in backwardLinkedMemberDict:
try:
backwardLinkedMemberDict[sd].sendall(msg.encode("ascii"))
except BrokenPipeError as err:
continue
return
def forwardMessage(originHID, origin_username, rmsg):
if(forwardLinkedMember and forwardLinkedMember[0] != origin_username):
forwardLinkedMember[1].sendall(rmsg)
for peer in backwardLinkedMemberDict.items():
if (peer[0] != str(origin_username)):
try:
peer[1].sendall(rmsg)
except BrokenPipeError as err:
continue
def do_Quit():
CmdWin.insert(1.0, "\nPress Quit")
global keepAlive,keep_alive_thread
keepAlive = False
isJoined = False
#if keep_alive_thread:
# keep_alive_thread.join()
if (forwardLinkedMember):
forwardLinkedMember[1].close()
if (backwardLinkedMemberDict):
for peer in backwardLinkedMemberDict.items():
peer[1].close()
if(socketUdpReceiver):
socketUdpReceiver.close()
if(socketTCPServer):
socketTCPServer.close()
sys.exit(0)
#
# Set up of Basic UI
#
win = Tk()
win.title("MyP2PChat")
#Top Frame for Message display
topframe = Frame(win, relief=RAISED, borderwidth=1)
topframe.pack(fill=BOTH, expand=True)
topscroll = Scrollbar(topframe)
MsgWin = Text(topframe, height='15', padx=5, pady=5, fg="red", exportselection=0, insertofftime=0)
MsgWin.pack(side=LEFT, fill=BOTH, expand=True)
topscroll.pack(side=RIGHT, fill=Y, expand=True)
MsgWin.config(yscrollcommand=topscroll.set)
topscroll.config(command=MsgWin.yview)
#Top Middle Frame for buttons
topmidframe = Frame(win, relief=RAISED, borderwidth=1)
topmidframe.pack(fill=X, expand=True)
Butt01 = Button(topmidframe, width='6', relief=RAISED, text="User", command=do_User)
Butt01.pack(side=LEFT, padx=8, pady=8);
Butt02 = Button(topmidframe, width='6', relief=RAISED, text="List", command=do_List)
Butt02.pack(side=LEFT, padx=8, pady=8);
Butt03 = Button(topmidframe, width='6', relief=RAISED, text="Join", command=do_Join)
Butt03.pack(side=LEFT, padx=8, pady=8);
Butt04 = Button(topmidframe, width='6', relief=RAISED, text="Send", command=do_Send)
Butt04.pack(side=LEFT, padx=8, pady=8);
Butt06 = Button(topmidframe, width='6', relief=RAISED, text="Poke", command=do_Poke)
Butt06.pack(side=LEFT, padx=8, pady=8);
Butt05 = Button(topmidframe, width='6', relief=RAISED, text="Quit", command=do_Quit)
Butt05.pack(side=LEFT, padx=8, pady=8);
#Lower Middle Frame for User input
lowmidframe = Frame(win, relief=RAISED, borderwidth=1)
lowmidframe.pack(fill=X, expand=True)
userentry = Entry(lowmidframe, fg="blue")
userentry.pack(fill=X, padx=4, pady=4, expand=True)
#Bottom Frame for displaying action info
bottframe = Frame(win, relief=RAISED, borderwidth=1)
bottframe.pack(fill=BOTH, expand=True)
bottscroll = Scrollbar(bottframe)
CmdWin = Text(bottframe, height='15', padx=5, pady=5, exportselection=0, insertofftime=0)
CmdWin.pack(side=LEFT, fill=BOTH, expand=True)
bottscroll.pack(side=RIGHT, fill=Y, expand=True)
CmdWin.config(yscrollcommand=bottscroll.set)
bottscroll.config(command=CmdWin.yview)
def main():
if len(sys.argv) != 4:
print("P2PChat.py <server address> <server port no.> <my port no.>")
sys.exit(2)
global serverPort, serverAddress, myPort
try:
serverPort = int(sys.argv[2])
except:
serverPort = 32340
try:
serverAddress = sys.argv[1]
except:
serverAddress = 'localhost'
try:
myPort = int(sys.argv[3])
except:
myPort = 32341
win.mainloop()
if __name__ == "__main__":
main()
|
tornado.py | import asyncio
import fnmatch
import json
import logging
import os
import threading
import time
import webbrowser
from functools import partial
from typing import Dict
from urllib.parse import urlparse
import tornado
import tornado.httpserver
import tornado.ioloop
from tornado.web import StaticFileHandler
from tornado.websocket import WebSocketHandler
from . import page
from .remote_access import start_remote_access_service
from .page import make_applications, render_page
from .utils import cdn_validation, deserialize_binary_event, print_listen_address
from ..session import CoroutineBasedSession, ThreadBasedSession, ScriptModeSession, \
register_session_implement_for_target, Session
from ..session.base import get_session_info_from_headers
from ..utils import get_free_port, wait_host_port, STATIC_PATH, iscoroutinefunction, isgeneratorfunction, \
check_webio_js, parse_file_size, random_str, LRUDict
logger = logging.getLogger(__name__)
_ioloop = None
def set_ioloop(loop):
global _ioloop
_ioloop = loop
def ioloop() -> tornado.ioloop.IOLoop:
"""获得运行Tornado server的IOLoop
本方法当前仅在显示boken app时使用
This method is currently only used when displaying boken app"""
global _ioloop
return _ioloop
def _check_origin(origin, allowed_origins, handler: WebSocketHandler):
if _is_same_site(origin, handler):
return True
return any(
fnmatch.fnmatch(origin, pattern)
for pattern in allowed_origins
)
def _is_same_site(origin, handler: WebSocketHandler):
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = handler.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def _webio_handler(applications=None, cdn=True, reconnect_timeout=0, check_origin_func=_is_same_site): # noqa: C901
"""
:param dict applications: dict of `name -> task function`
:param bool/str cdn: Whether to load front-end static resources from CDN
:param callable check_origin_func: check_origin_func(origin, handler) -> bool
:return: Tornado RequestHandler class
"""
check_webio_js()
if applications is None:
applications = dict(index=lambda: None) # mock PyWebIO app
class WSHandler(WebSocketHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._close_from_session = False
self.session_id = None
self.session = None # type: Session
if reconnect_timeout and not type(self)._started_clean_task:
type(self)._started_clean_task = True
tornado.ioloop.IOLoop.current().call_later(reconnect_timeout // 2, type(self).clean_expired_sessions)
logger.debug("Started session clean task")
def get_app(self):
app_name = self.get_query_argument('app', 'index')
app = applications.get(app_name) or applications['index']
return app
async def get(self, *args, **kwargs) -> None:
# It's a simple http GET request
if self.request.headers.get("Upgrade", "").lower() != "websocket":
# Backward compatible
# Frontend detect whether the backend is http server
if self.get_query_argument('test', ''):
return self.write('')
app = self.get_app()
html = render_page(app, protocol='ws', cdn=cdn)
return self.write(html)
else:
await super().get()
def check_origin(self, origin):
return check_origin_func(origin=origin, handler=self)
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
@classmethod
def clean_expired_sessions(cls):
tornado.ioloop.IOLoop.current().call_later(reconnect_timeout // 2, cls.clean_expired_sessions)
while cls._session_expire:
session_id, expire_ts = cls._session_expire.popitem(last=False) # 弹出最早过期的session
if time.time() < expire_ts:
# this session is not expired
cls._session_expire[session_id] = expire_ts # restore this item
cls._session_expire.move_to_end(session_id, last=False) # move to front
break
# clean this session
logger.debug("session %s expired" % session_id)
cls._connections.pop(session_id, None)
session = cls._webio_sessions.pop(session_id, None)
if session:
session.close(nonblock=True)
@classmethod
def send_msg_to_client(cls, _, session_id=None):
conn = cls._connections.get(session_id)
session = cls._webio_sessions[session_id]
if not conn or not conn.ws_connection:
return
for msg in session.get_task_commands():
try:
conn.write_message(json.dumps(msg))
except TypeError as e:
logger.exception('Data serialization error: %s\n'
'This may be because you pass the wrong type of parameter to the function'
' of PyWebIO.\nData content: %s', e, msg)
@classmethod
def close_from_session(cls, session_id=None):
cls.send_msg_to_client(None, session_id=session_id)
conn = cls._connections.pop(session_id, None)
cls._webio_sessions.pop(session_id, None)
if conn and conn.ws_connection:
conn._close_from_session = True
conn.close()
_started_clean_task = False
_session_expire = LRUDict() # session_id -> expire timestamp. In increasing order of expire time
_webio_sessions = {} # type: Dict[str, Session] # session_id -> session
_connections = {} # type: Dict[str, WSHandler] # session_id -> WSHandler
def open(self):
logger.debug("WebSocket opened")
cls = type(self)
self.session_id = self.get_query_argument('session', None)
if self.session_id in ('NEW', None): # 初始请求,创建新 Session
session_info = get_session_info_from_headers(self.request.headers)
session_info['user_ip'] = self.request.remote_ip
session_info['request'] = self.request
session_info['backend'] = 'tornado'
session_info['protocol'] = 'websocket'
application = self.get_app()
self.session_id = random_str(24)
cls._connections[self.session_id] = self
if iscoroutinefunction(application) or isgeneratorfunction(application):
self.session = CoroutineBasedSession(
application, session_info=session_info,
on_task_command=partial(self.send_msg_to_client, session_id=self.session_id),
on_session_close=partial(self.close_from_session, session_id=self.session_id))
else:
self.session = ThreadBasedSession(
application, session_info=session_info,
on_task_command=partial(self.send_msg_to_client, session_id=self.session_id),
on_session_close=partial(self.close_from_session, session_id=self.session_id),
loop=asyncio.get_event_loop())
cls._webio_sessions[self.session_id] = self.session
if reconnect_timeout:
self.write_message(json.dumps(dict(command='set_session_id', spec=self.session_id)))
elif self.session_id not in cls._webio_sessions: # WebIOSession deleted
self.write_message(json.dumps(dict(command='close_session')))
else:
self.session = cls._webio_sessions[self.session_id]
cls._session_expire.pop(self.session_id, None)
cls._connections[self.session_id] = self
cls.send_msg_to_client(self.session, self.session_id)
logger.debug('session id: %s' % self.session_id)
def on_message(self, message):
if isinstance(message, bytes):
event = deserialize_binary_event(message)
else:
event = json.loads(message)
if event is None:
return
self.session.send_client_event(event)
def on_close(self):
cls = type(self)
cls._connections.pop(self.session_id, None)
if not reconnect_timeout and not self._close_from_session:
self.session.close(nonblock=True)
elif reconnect_timeout:
if self._close_from_session:
cls._webio_sessions.pop(self.session_id, None)
elif self.session:
cls._session_expire[self.session_id] = time.time() + reconnect_timeout
logger.debug("WebSocket closed")
return WSHandler
def webio_handler(applications, cdn=True, reconnect_timeout=0, allowed_origins=None, check_origin=None):
"""Get the ``RequestHandler`` class for running PyWebIO applications in Tornado.
The ``RequestHandler`` communicates with the browser by WebSocket protocol.
The arguments of ``webio_handler()`` have the same meaning as for :func:`pywebio.platform.tornado.start_server`
"""
applications = make_applications(applications)
for target in applications.values():
register_session_implement_for_target(target)
cdn = cdn_validation(cdn, 'error') # if CDN is not available, raise error
if check_origin is None:
check_origin_func = partial(_check_origin, allowed_origins=allowed_origins or [])
else:
check_origin_func = lambda origin, handler: _is_same_site(origin, handler) or check_origin(origin)
return _webio_handler(applications=applications, cdn=cdn, check_origin_func=check_origin_func,
reconnect_timeout=reconnect_timeout)
async def open_webbrowser_on_server_started(host, port):
url = 'http://%s:%s' % (host, port)
is_open = await wait_host_port(host, port, duration=20)
if is_open:
logger.info('Try open %s in web browser' % url)
# webbrowser.open() may block, so invoke it in thread
threading.Thread(target=webbrowser.open, args=(url,), daemon=True).start()
else:
logger.error('Open %s in web browser failed.' % url)
def _setup_server(webio_handler, port=0, host='', static_dir=None, max_buffer_size=2 ** 20 * 200,
**tornado_app_settings):
if port == 0:
port = get_free_port()
handlers = [(r"/", webio_handler)]
if static_dir is not None:
handlers.append((r"/static/(.*)", StaticFileHandler, {"path": static_dir}))
handlers.append((r"/(.*)", StaticFileHandler, {"path": STATIC_PATH, 'default_filename': 'index.html'}))
app = tornado.web.Application(handlers=handlers, **tornado_app_settings)
# Credit: https://stackoverflow.com/questions/19074972/content-length-too-long-when-uploading-file-using-tornado
server = app.listen(port, address=host, max_buffer_size=max_buffer_size)
return server, port
def start_server(applications, port=0, host='',
debug=False, cdn=True, static_dir=None,
remote_access=False,
reconnect_timeout=0,
allowed_origins=None, check_origin=None,
auto_open_webbrowser=False,
max_payload_size='200M',
**tornado_app_settings):
"""Start a Tornado server to provide the PyWebIO application as a web service.
The Tornado server communicates with the browser by WebSocket protocol.
Tornado is the default backend server for PyWebIO applications,
and ``start_server`` can be imported directly using ``from pywebio import start_server``.
:param list/dict/callable applications: PyWebIO application.
Can be a task function, a list of functions, or a dictionary.
Refer to :ref:`Advanced topic: Multiple applications in start_server() <multiple_app>` for more information.
When the task function is a coroutine function, use :ref:`Coroutine-based session <coroutine_based_session>` implementation,
otherwise, use thread-based session implementation.
:param int port: The port the server listens on.
When set to ``0``, the server will automatically select a available port.
:param str host: The host the server listens on. ``host`` may be either an IP address or hostname.
If it’s a hostname, the server will listen on all IP addresses associated with the name.
``host`` may be an empty string or None to listen on all available interfaces.
:param bool debug: Tornado Server's debug mode. If enabled, the server will automatically reload for code changes.
See `tornado doc <https://www.tornadoweb.org/en/stable/guide/running.html#debug-mode>`_ for more detail.
:param bool/str cdn: Whether to load front-end static resources from CDN, the default is ``True``.
Can also use a string to directly set the url of PyWebIO static resources.
:param str static_dir: The directory to store the application static files.
The files in this directory can be accessed via ``http://<host>:<port>/static/files``.
For example, if there is a ``A/B.jpg`` file in ``static_dir`` path,
it can be accessed via ``http://<host>:<port>/static/A/B.jpg``.
:param bool remote_access: Whether to enable remote access, when enabled,
you can get a temporary public network access address for the current application,
others can access your application via this address.
:param bool auto_open_webbrowser: Whether or not auto open web browser when server is started (if the operating system allows it) .
:param int reconnect_timeout: The client can reconnect to server within ``reconnect_timeout`` seconds after an unexpected disconnection.
If set to 0 (default), once the client disconnects, the server session will be closed.
:param list allowed_origins: The allowed request source list. (The current server host is always allowed)
The source contains the protocol, domain name, and port part.
Can use Unix shell-style wildcards:
- ``*`` matches everything
- ``?`` matches any single character
- ``[seq]`` matches any character in *seq*
- ``[!seq]`` matches any character not in *seq*
Such as: ``https://*.example.com`` 、 ``*://*.example.com``
For detail, see `Python Doc <https://docs.python.org/zh-tw/3/library/fnmatch.html>`_
:param callable check_origin: The validation function for request source.
It receives the source string (which contains protocol, host, and port parts) as parameter and
return ``True/False`` to indicate that the server accepts/rejects the request.
If ``check_origin`` is set, the ``allowed_origins`` parameter will be ignored.
:param bool auto_open_webbrowser: Whether or not auto open web browser when server is started (if the operating system allows it) .
:param int/str max_payload_size: Max size of a websocket message which Tornado can accept.
Messages larger than the ``max_payload_size`` (default 200MB) will not be accepted.
``max_payload_size`` can be a integer indicating the number of bytes, or a string ending with `K` / `M` / `G`
(representing kilobytes, megabytes, and gigabytes, respectively).
E.g: ``500``, ``'40K'``, ``'3M'``
:param tornado_app_settings: Additional keyword arguments passed to the constructor of ``tornado.web.Application``.
For details, please refer: https://www.tornadoweb.org/en/stable/web.html#tornado.web.Application.settings
"""
set_ioloop(tornado.ioloop.IOLoop.current()) # to enable bokeh app
cdn = cdn_validation(cdn, 'warn') # if CDN is not available, warn user and disable CDN
page.MAX_PAYLOAD_SIZE = max_payload_size = parse_file_size(max_payload_size)
debug = Session.debug = os.environ.get('PYWEBIO_DEBUG', debug)
# Since some cloud server may close idle connections (such as heroku),
# use `websocket_ping_interval` to keep the connection alive
tornado_app_settings.setdefault('websocket_ping_interval', 30)
tornado_app_settings.setdefault('websocket_max_message_size', max_payload_size) # Backward compatible
tornado_app_settings['websocket_max_message_size'] = parse_file_size(
tornado_app_settings['websocket_max_message_size'])
tornado_app_settings['debug'] = debug
handler = webio_handler(applications, cdn, allowed_origins=allowed_origins, check_origin=check_origin,
reconnect_timeout=reconnect_timeout)
_, port = _setup_server(webio_handler=handler, port=port, host=host, static_dir=static_dir,
max_buffer_size=max_payload_size, **tornado_app_settings)
print_listen_address(host, port)
if auto_open_webbrowser:
tornado.ioloop.IOLoop.current().spawn_callback(open_webbrowser_on_server_started, host or 'localhost', port)
if remote_access:
start_remote_access_service(local_port=port)
tornado.ioloop.IOLoop.current().start()
def start_server_in_current_thread_session():
"""启动 script mode 的server,监听可用端口,并自动打开浏览器
Start the server for script mode, and automatically open the browser when the server port is available.
PYWEBIO_SCRIPT_MODE_PORT环境变量可以设置监听端口,并关闭自动打开浏览器,用于测试
The PYWEBIO_SCRIPT_MODE_PORT environment variable can set the listening port, just used in testing.
"""
websocket_conn_opened = threading.Event()
thread = threading.current_thread()
class SingleSessionWSHandler(_webio_handler(cdn=False)):
session = None
instance = None
def open(self):
self.main_session = False
cls = type(self)
if SingleSessionWSHandler.session is None:
self.main_session = True
SingleSessionWSHandler.instance = self
self.session_id = 'main'
cls._connections[self.session_id] = self
session_info = get_session_info_from_headers(self.request.headers)
session_info['user_ip'] = self.request.remote_ip
session_info['request'] = self.request
session_info['backend'] = 'tornado'
session_info['protocol'] = 'websocket'
self.session = SingleSessionWSHandler.session = ScriptModeSession(
thread, session_info=session_info,
on_task_command=partial(self.send_msg_to_client, session_id=self.session_id),
loop=asyncio.get_event_loop())
websocket_conn_opened.set()
cls._webio_sessions[self.session_id] = self.session
else:
self.close()
def on_close(self):
if SingleSessionWSHandler.session is not None and self.main_session:
self.session.close()
logger.debug('ScriptModeSession closed')
async def wait_to_stop_loop(server):
"""当只剩当前线程和Daemon线程运行时,关闭Server
When only the current thread and Daemon thread are running, close the Server"""
# 包括当前线程在内的非Daemon线程数
# The number of non-Daemon threads(including the current thread)
alive_none_daemonic_thread_cnt = None
while alive_none_daemonic_thread_cnt != 1:
alive_none_daemonic_thread_cnt = sum(
1 for t in threading.enumerate() if t.is_alive() and not t.isDaemon()
)
await asyncio.sleep(1)
# 关闭Websocket连接
# Close the Websocket connection
if SingleSessionWSHandler.instance:
SingleSessionWSHandler.instance.close()
server.stop()
logger.debug('Closing tornado ioloop...')
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task() and not t.done()]
for task in tasks:
task.cancel()
# 必须需要 await asyncio.sleep ,否则上方 task.cancel() 调用无法调度生效
# This line must be required, otherwise the `task.cancel()` call cannot be scheduled to take effect
await asyncio.sleep(0)
tornado.ioloop.IOLoop.current().stop()
def server_thread():
from tornado.log import access_log, app_log, gen_log
access_log.setLevel(logging.ERROR)
app_log.setLevel(logging.ERROR)
gen_log.setLevel(logging.ERROR)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
set_ioloop(tornado.ioloop.IOLoop.current()) # to enable bokeh app
port = 0
if os.environ.get("PYWEBIO_SCRIPT_MODE_PORT"):
port = int(os.environ.get("PYWEBIO_SCRIPT_MODE_PORT"))
server, port = _setup_server(webio_handler=SingleSessionWSHandler, port=port, host='localhost',
websocket_max_message_size=parse_file_size('200M'))
tornado.ioloop.IOLoop.current().spawn_callback(partial(wait_to_stop_loop, server=server))
if "PYWEBIO_SCRIPT_MODE_PORT" not in os.environ:
tornado.ioloop.IOLoop.current().spawn_callback(open_webbrowser_on_server_started, 'localhost', port)
tornado.ioloop.IOLoop.current().start()
logger.debug('Tornado server exit')
t = threading.Thread(target=server_thread, name='Tornado-server')
t.start()
websocket_conn_opened.wait()
|
main.py | #code:utf8
import torch
import os
from losses.main import get_loss
import numpy as np
from tqdm import tqdm
from torch import nn
import math
import val
import models.bn_inception as network
import models.embed as embed
import torchvision.models as model
from sample_data.sample_data import Preprocess
import threading
import warnings
warnings.filterwarnings('ignore')
import sklearn.metrics.pairwise
import sklearn.cluster
import sklearn.metrics.cluster
max_prec=0
max_history=0
def run(cfg):
print(cfg.NET)
if cfg.NET=="bn_inception_v2":
net = network.bn_inception(pretrained = True)
if cfg.NET=="densenet201":
net = model.densenet201(pretrained = True)
embed.embed(net, sz_embedding=cfg.EMBEDDING_WIDTH,normalize_output = True, net_id = cfg.NET)
if cfg.USE_CUDA==1:
net.cuda()
metric_loss = get_loss(n_input=cfg.N, k=cfg.K, tau=cfg.TAU,n_pos=cfg.POS_SAMPLE_NUM, margin=cfg.MARGIN,input_dim=cfg.EMBEDDING_WIDTH,output_dim=cfg.TRAIN_CLASS,batch_size=cfg.BATCH_SIZE,method=cfg.METHOD).cuda()
softmax_loss = get_loss(input_dim=cfg.EMBEDDING_WIDTH,output_dim=cfg.TRAIN_CLASS,margin=cfg.SOFTMAX_MARGIN,method=cfg.SOFTMAX_METHOD).cuda()
optimizer = torch.optim.Adam(
[
{ # embedding parameters
'params': net.embedding_layer.parameters(),
'lr' : cfg.EMBD_LR
},
{ # softmax loss parameters
'params': softmax_loss.parameters(),
'lr': cfg.SOFTMAX_LOSS_LR
},
{ # architecture parameters, excluding embedding layer
'params': list(
set(
net.parameters()
).difference(
set(net.embedding_layer.parameters())
)
),
'lr' : cfg.NET_LR
},
{ # metric loss parameters
'params': metric_loss.parameters(),
'lr': cfg.METRIC_LOSS_LR
},
],
eps = 1e-2,
weight_decay = cfg.WEIGHT_DECAY
)
#for i in metric_loss.named_parameters():
# print(i)
model_name=cfg.MODEL_PATH+str(cfg.MARGIN)+str(cfg.EMBEDDING_WIDTH)+str(cfg.METRIC_LOSS_PARAM)+'x'+str(cfg.METHOD)+str(cfg.CE_LOSS_PARAM)+'x'+str(cfg.SOFTMAX_METHOD)+str(cfg.K)+str(cfg.POS_SAMPLE_NUM)+".pkl"
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, cfg.SCHEDULER_STEP, gamma = cfg.GAMMA) #[10,30,70]
print("metric_rate:",cfg.METRIC_LOSS_PARAM,"softmax_rate:",cfg.CE_LOSS_PARAM)
if cfg.TRAINING_OLD==1:
print("Load model params")
net.load_state_dict(torch.load(model_name+"62.95"))
preprocess = Preprocess(root=cfg.DATA_ROOT,use_cuda=cfg.USE_CUDA,train_batch_size=cfg.BATCH_SIZE,test_batch_size=cfg.TEST_BATCH_SIZE,method=cfg.METHOD,dataset_name=cfg.DATASET,with_bounding_box=cfg.WITH_BOUNDING_BOX,download=cfg.DOWNLOAD,n_pos=cfg.POS_SAMPLE_NUM,N=cfg.N)
if cfg.METHOD==0 or cfg.METHOD==8:
metric = 'cosine'
else:
metric = 'euclidean'
print("embd_size=",cfg.EMBEDDING_WIDTH,"dataset=",cfg.DATASET,"batch_size=",cfg.BATCH_SIZE,"GPU ID=",cfg.GPU_NUM)
print("EMBD_LR=",cfg.EMBD_LR,"SOFTMAX_LOSS_LR=",cfg.SOFTMAX_LOSS_LR,"NET_LR=",cfg.NET_LR)
if cfg.METHOD==0:
print("tau=",cfg.TAU,"K=",cfg.K,"N=",cfg.N,"N+=",cfg.POS_SAMPLE_NUM,cfg.BATCH_SIZE,'margin=',cfg.MARGIN)
run_num=0
sparsity=0
err_pos=0
old_err_pos=0
total_sparsity=0
total_err_pos=0
totalEpochLoss=0
flag=0
#X1, T1, _=val.inference(net,preprocess.test_loader)
#save_log(X1,T1,metric,run_num,totalEpochLoss,total_sparsity,net,model_name)
for epoch in range(cfg.EPOCH):
#train
scheduler.step()
train_loader=iter(preprocess.train_loader)
for _ in tqdm(range(len(preprocess.train_loader))):
if run_num%cfg.SHOW_PER_ITER==cfg.SHOW_PER_ITER-1:
X1=0
T1=0
X1, T1, _=val.inference(net,preprocess.test_loader)
#X2, T2, _=val.inference(net,preprocess.test_train_loader)
#
if cfg.MULTI_THREAD == 1:
t = threading.Thread(target=save_log,args=(X1,T1,metric,run_num,totalEpochLoss,total_err_pos,net,model_name))#create threading
t.setDaemon(True)
t.start()
else:
save_log(X1,T1,metric,run_num,totalEpochLoss,total_sparsity,net,model_name)
totalEpochLoss=0
data_ = train_loader.next()
batch_img, real_y, img_name = data_
optimizer.zero_grad()
out=net(batch_img.cuda())
loss_metric,err_pos,sparsity=metric_loss(out,real_y.cuda())
loss=0#cfg.METRIC_LOSS_PARAM*loss_metric#+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y.cuda())
loss+=loss_metric
total_err_pos=err_pos
totalEpochLoss=totalEpochLoss+loss.data/cfg.SHOW_PER_ITER
if math.isnan(loss.data)==False:
loss.backward()
optimizer.step()
else:
print(loss.data)
run_num+=1
print("\r\nEpoch:",epoch,"tau:",cfg.TAU)
def save_log(X1,T1,metric,run_num,totalEpochLoss,total_err_pos,net,model_name):
global max_prec
global max_history
print(max_history)
print(model_name)
preck_test,recallk_test=val.test(X1, T1, cfg.TEST_K,metric)
nmi,mAP,PR,ROC,F1 = 0,0,0,0,0
print("iter:",run_num,"loss:",totalEpochLoss)
prec_recall_test="&"+str('%.2f'%preck_test[0])+"&"+str('%.2f'%preck_test[1])+"&"+str('%.2f'%preck_test[2])+"&"+str('%.2f'%preck_test[3])+"&"+str('%.2f'%recallk_test[1])+"&"+str('%.2f'%recallk_test[2])+"&"+str('%.2f'%recallk_test[3])+"\\\\"
print(prec_recall_test)
torch.save(net.state_dict(),model_name+str('%.2f'%recallk_test[0]))
if recallk_test[0]>max_prec:
max_prec=recallk_test[0]
max_history = prec_recall_test
torch.save(net.state_dict(),model_name+str('%.2f'%recallk_test[0]))
#nmi,mAP,PR,ROC,F1 = val.test_some_scores(X1, T1,metric,cfg.TEST_CLASS)
#show_result = "&"+str('%.2f'%preck_test[0])+"&"+str('%.2f'%preck_test[1])+"&"+str('%.2f'%preck_test[2])+"&"+str('%.2f'%preck_test[3])+"&"+str('%.2f'%recallk_test[1])+"&"+str('%.2f'%recallk_test[2])+"&"+str('%.2f'%recallk_test[3])+"&"+str('%.2f'%nmi)+"&"+str('%.2f'%mAP)+"&"+str('%.2f'%F1)+"\\\\"
#print(show_result)
if __name__ == '__main__':
import config.config_CUB200_2011 as cfg
#import config.config_CARS196 as cfg
#import config.config_ONLINE_PRODUCT as cfg
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.GPU_NUM
run(cfg)
|
testing.py | import random
import os
import sys
import gi
gi.require_version('Gtk', '3.0')
from twisted.internet import gtk3reactor
import threading
gtk3reactor.install()
from gi.repository import GLib, Gtk, GObject
import time
from twisted.internet import reactor
from base import SignalObject
class BaseDevice(SignalObject):
active = GObject.Signal("active", arg_types=(bool,))
class Detector(BaseDevice):
activity = GObject.Signal("activity", arg_types=(int,))
if __name__ == '__main__':
def show_active(obj, value):
print("active", obj, value)
def show_activity(obj, value):
print("activity", obj, value)
def run():
dev = Detector()
dev.active.connect(show_active)
dev.activity.connect(show_activity)
count = 0
while count < 50:
dev.transmit('active', True)
dev.transmit('activity', random.randint(0, 100))
time.sleep(1)
count += 1
reactor.stop()
worker_thread = threading.Thread(target=run)
worker_thread.setDaemon(True)
reactor.callLater(0, worker_thread.start)
reactor.run()
|
cache.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the QueryCache functionality of johnny."""
import django
from django.conf import settings
from django.db import connection
try:
from django.db import connections
except:
connections = None
from johnny import middleware
from johnny import settings as johnny_settings
import base
try:
any
except NameError:
def any(iterable):
for i in iterable:
if i: return True
return False
# put tests in here to be included in the testing suite
__all__ = ['MultiDbTest', 'SingleModelTest', 'MultiModelTest', 'TransactionSupportTest', 'BlackListTest', 'TransactionManagerTestCase']
def _pre_setup(self):
self.saved_DISABLE_SETTING = getattr(johnny_settings, 'DISABLE_QUERYSET_CACHE', False)
johnny_settings.DISABLE_QUERYSET_CACHE = False
self.middleware = middleware.QueryCacheMiddleware()
def _post_teardown(self):
self.middleware.unpatch()
johnny_settings.DISABLE_QUERYSET_CACHE = self.saved_DISABLE_SETTING
class QueryCacheBase(base.JohnnyTestCase):
def _pre_setup(self):
_pre_setup(self)
super(QueryCacheBase, self)._pre_setup()
def _post_teardown(self):
_post_teardown(self)
super(QueryCacheBase, self)._post_teardown()
class TransactionQueryCacheBase(base.TransactionJohnnyTestCase):
def _pre_setup(self):
_pre_setup(self)
super(TransactionQueryCacheBase, self)._pre_setup()
def _post_teardown(self):
from django.db import transaction
_post_teardown(self)
super(TransactionQueryCacheBase, self)._post_teardown()
if transaction.is_managed():
transaction.managed(False)
class BlackListTest(QueryCacheBase):
fixtures = base.johnny_fixtures
def test_basic_blacklist(self):
from johnny import cache, settings
from testapp.models import Genre, Book
q = base.message_queue()
old = johnny_settings.BLACKLIST
johnny_settings.BLACKLIST = set(['testapp_genre'])
connection.queries = []
Book.objects.get(id=1)
Book.objects.get(id=1)
self.failUnless((False, True) == (q.get_nowait(), q.get_nowait()))
list(Genre.objects.all())
list(Genre.objects.all())
self.failUnless(not any((q.get_nowait(), q.get_nowait())))
johnny_settings.BLACKLIST = old
class MultiDbTest(TransactionQueryCacheBase):
multi_db = True
fixtures = ['genres.json', 'genres.second.json']
def _run_threaded(self, query, queue):
"""Runs a query (as a string) from testapp in another thread and
puts (hit?, result) on the provided queue."""
from threading import Thread
def _inner(_query):
from testapp.models import Genre, Book, Publisher, Person
from johnny.signals import qc_hit, qc_miss, qc_skip
from johnny.cache import local
from django.db import transaction
msg = []
def hit(*args, **kwargs):
msg.append(True)
def miss(*args, **kwargs):
msg.append(False)
def skip(*args, **kwargs):
msg.append(False)
qc_hit.connect(hit)
qc_miss.connect(miss)
qc_skip.connect(skip)
obj = eval(_query)
msg.append(obj)
queue.put(msg)
t = Thread(target=_inner, args=(query,))
t.start()
t.join()
def _other(self, cmd, q):
def _innter(cmd):
q.put(eval(cmd))
t = Thread(target=_inner, args=(cmd,))
t.start()
t.join()
def test_basic_queries(self):
"""Tests basic queries and that the cache is working for multiple db's"""
if len(getattr(settings, "DATABASES", [])) <= 1:
print "\n Skipping multi database tests"
return
from pprint import pformat
from testapp.models import Genre, Book, Publisher, Person
from django.db import connections
self.failUnless("default" in getattr(settings, "DATABASES"))
self.failUnless("second" in getattr(settings, "DATABASES"))
g1 = Genre.objects.using("default").get(pk=1)
g1.title = "A default database"
g1.save(using='default')
g2 = Genre.objects.using("second").get(pk=1)
g2.title = "A second database"
g2.save(using='second')
for c in connections:
connections[c].queries = []
#fresh from cache since we saved each
g1 = Genre.objects.using('default').get(pk=1)
g2 = Genre.objects.using('second').get(pk=1)
for c in connections:
self.failUnless(len(connections[c].queries) == 1)
self.failUnless(g1.title == "A default database")
self.failUnless(g2.title == "A second database")
#should be a cache hit
g1 = Genre.objects.using('default').get(pk=1)
g2 = Genre.objects.using('second').get(pk=1)
for c in connections:
self.failUnless(len(connections[c].queries) == 1)
def test_cache_key_setting(self):
"""Tests that two databases use a single cached object when given the same DB cache key"""
if len(getattr(settings, "DATABASES", [])) <= 1:
print "\n Skipping multi database tests"
return
from testapp.models import Genre
from django.db import connections
self.failUnless("default" in getattr(settings, "DATABASES"))
self.failUnless("second" in getattr(settings, "DATABASES"))
old_cache_keys = johnny_settings.DB_CACHE_KEYS
johnny_settings.DB_CACHE_KEYS = {'default': 'default', 'second': 'default'}
g1 = Genre.objects.using("default").get(pk=1)
g1.title = "A default database"
g1.save(using='default')
g2 = Genre.objects.using("second").get(pk=1)
g2.title = "A second database"
g2.save(using='second')
for c in connections:
connections[c].queries = []
#fresh from cache since we saved each
g1 = Genre.objects.using('default').get(pk=1)
g2 = Genre.objects.using('second').get(pk=1)
johnny_settings.DB_CACHE_KEYS = old_cache_keys
total_queries = sum([len(connections[c].queries)
for c in connections])
self.assertEqual(total_queries, 1)
def test_transactions(self):
"""Tests transaction rollbacks and local cache for multiple dbs"""
if len(getattr(settings, "DATABASES", [])) <= 1:
print "\n Skipping multi database tests"
return
if hasattr(settings, 'DATABASE_ENGINE'):
if settings.DATABASE_ENGINE == 'sqlite3':
print "\n Skipping test requiring multiple threads."
return
else:
from django.db import connections, transaction
for db in settings.DATABASES.values():
if db['ENGINE'] == 'sqlite3':
print "\n Skipping test requiring multiple threads."
return
for conname in connections:
con = connections[conname]
if not base.supports_transactions(con):
print "\n Skipping test requiring transactions."
return
from django.db import connections, transaction
from johnny import cache as c
from Queue import Queue as queue
q = queue()
other = lambda x: self._run_threaded(x, q)
from testapp.models import Genre
# sanity check
self.failUnless(transaction.is_managed() == False)
self.failUnless(transaction.is_dirty() == False)
self.failUnless("default" in getattr(settings, "DATABASES"))
self.failUnless("second" in getattr(settings, "DATABASES"))
# this should seed this fetch in the global cache
g1 = Genre.objects.using("default").get(pk=1)
g2 = Genre.objects.using("second").get(pk=1)
start_g1 = g1.title
transaction.enter_transaction_management(using='default')
transaction.managed(using='default')
transaction.enter_transaction_management(using='second')
transaction.managed(using='second')
g1.title = "Testing a rollback"
g2.title = "Testing a commit"
g1.save()
g2.save()
# test outside of transaction, should be cache hit and
# not contain the local changes
other("Genre.objects.using('default').get(pk=1)")
hit, ostart = q.get()
self.failUnless(ostart.title == start_g1)
self.failUnless(hit)
transaction.rollback(using='default')
transaction.commit(using='second')
transaction.managed(False, "default")
transaction.managed(False, "second")
#other thread should have seen rollback
other("Genre.objects.using('default').get(pk=1)")
hit, ostart = q.get()
self.failUnless(ostart.title == start_g1)
self.failUnless(hit)
connections['default'].queries = []
connections['second'].queries = []
#should be a cache hit due to rollback
g1 = Genre.objects.using("default").get(pk=1)
#should be a db hit due to commit
g2 = Genre.objects.using("second").get(pk=1)
self.failUnless(connections['default'].queries == [])
self.failUnless(len(connections['second'].queries) == 1)
#other thread sould now be accessing the cache after the get
#from the commit.
other("Genre.objects.using('second').get(pk=1)")
hit, ostart = q.get()
self.failUnless(ostart.title == g2.title)
self.failUnless(hit)
self.failUnless(g1.title == start_g1)
self.failUnless(g2.title == "Testing a commit")
transaction.leave_transaction_management("default")
transaction.leave_transaction_management("second")
def test_savepoints(self):
"""tests savepoints for multiple db's"""
from Queue import Queue as queue
q = queue()
other = lambda x: self._run_threaded(x, q)
from testapp.models import Genre
try:
from django.db import connections, transaction
except ImportError:
# connections doesn't exist in 1.1 and under
print"\n Skipping multi database tests"
if len(getattr(settings, "DATABASES", [])) <= 1:
print "\n Skipping multi database tests"
return
for name, db in settings.DATABASES.items():
if name in ('default', 'second'):
if 'sqlite' in db['ENGINE']:
print "\n Skipping test requiring multiple threads."
return
con = connections[name]
if not con.features.uses_savepoints:
print "\n Skipping test requiring savepoints."
return
# sanity check
self.failUnless(transaction.is_managed() == False)
self.failUnless(transaction.is_dirty() == False)
self.failUnless("default" in getattr(settings, "DATABASES"))
self.failUnless("second" in getattr(settings, "DATABASES"))
g1 = Genre.objects.using("default").get(pk=1)
start_g1 = g1.title
g2 = Genre.objects.using("second").get(pk=1)
transaction.enter_transaction_management(using='default')
transaction.managed(using='default')
transaction.enter_transaction_management(using='second')
transaction.managed(using='second')
g1.title = "Rollback savepoint"
g1.save()
g2.title = "Committed savepoint"
g2.save(using="second")
sid2 = transaction.savepoint(using="second")
sid = transaction.savepoint(using="default")
g1.title = "Dirty text"
g1.save()
#other thread should see the original key and cache object from memcache,
#not the local cache version
other("Genre.objects.using('default').get(pk=1)")
hit, ostart = q.get()
self.failUnless(hit)
self.failUnless(ostart.title == start_g1)
#should not be a hit due to rollback
connections["default"].queries = []
transaction.savepoint_rollback(sid, using="default")
g1 = Genre.objects.using("default").get(pk=1)
# i think it should be "Rollback Savepoint" here
self.failUnless(g1.title == start_g1)
#will be pushed to dirty in commit
g2 = Genre.objects.using("second").get(pk=1)
self.failUnless(g2.title == "Committed savepoint")
transaction.savepoint_commit(sid2, using="second")
#other thread should still see original version even
#after savepoint commit
other("Genre.objects.using('second').get(pk=1)")
hit, ostart = q.get()
self.failUnless(hit)
self.failUnless(ostart.title == start_g1)
connections["second"].queries = []
g2 = Genre.objects.using("second").get(pk=1)
self.failUnless(connections["second"].queries == [])
transaction.commit(using="second")
transaction.managed(False, "second")
g2 = Genre.objects.using("second").get(pk=1)
self.failUnless(connections["second"].queries == [])
self.failUnless(g2.title == "Committed savepoint")
#now committed and cached, other thread should reflect new title
#without a hit to the db
other("Genre.objects.using('second').get(pk=1)")
hit, ostart = q.get()
self.failUnless(ostart.title == g2.title)
self.failUnless(hit)
transaction.managed(False, "default")
transaction.leave_transaction_management("default")
transaction.leave_transaction_management("second")
class SingleModelTest(QueryCacheBase):
fixtures = base.johnny_fixtures
def test_multi_where_cache_coherency(self):
"""A test to detect the issue described in bitbucket #24:
https://bitbucket.org/jmoiron/johnny-cache/issue/24/
"""
from testapp.models import Issue24Model as i24m
i24m.objects.get_or_create(one=1, two=1)
i24m.objects.get_or_create(one=1, two=2)
i24m.objects.get_or_create(one=2, two=1)
i24m.objects.get_or_create(one=2, two=2)
ones = i24m.objects.filter(one=1)
twos = i24m.objects.filter(two=1)
res = i24m.objects.filter(one__in=ones).exclude(two=twos).all()
# XXX: I'm afraid I don't even understand what this is supposed
# to be doing here, and in any case this test case fails. I've
# included something similar to the patch in #24, if someone knows
# how to write a test case to create that condition please do so here
def test_exists_hit(self):
"""Tests that an exist failure caches properly"""
from testapp.models import Publisher
connection.queries = []
Publisher.objects.filter(title="Doesn't Exist").exists()
Publisher.objects.filter(title="Doesn't Exist").exists()
self.assertEqual(len(connection.queries), 1)
def test_basic_querycaching(self):
"""A basic test that querycaching is functioning properly and is
being invalidated properly on singular table reads & writes."""
from testapp.models import Publisher, Genre
from django.db.models import Q
connection.queries = []
starting_count = Publisher.objects.count()
starting_count = Publisher.objects.count()
# make sure that doing this twice doesn't hit the db twice
self.failUnless(len(connection.queries) == 1)
self.failUnless(starting_count == 1)
# this write should invalidate the key we have
Publisher(title='Harper Collins', slug='harper-collins').save()
connection.queries = []
new_count = Publisher.objects.count()
self.failUnless(len(connection.queries) == 1)
self.failUnless(new_count == 2)
# this tests the codepath after 'except EmptyResultSet' where
# result_type == MULTI
self.failUnless(not list(Publisher.objects.filter(title__in=[])))
# test for a regression on the WhereNode, bitbucket #20
g1 = Genre.objects.get(pk=1)
g1.title = "Survival Horror"
g1.save()
g1 = Genre.objects.get(Q(title__iexact="Survival Horror"))
def test_querycache_return_results(self):
"""Test that the return results from the query cache are what we
expect; single items are single items, etc."""
from testapp.models import Publisher
connection.queries = []
pub = Publisher.objects.get(id=1)
pub2 = Publisher.objects.get(id=1)
self.failUnless(pub == pub2)
self.failUnless(len(connection.queries) == 1)
pubs = list(Publisher.objects.all())
pubs2 = list(Publisher.objects.all())
self.failUnless(pubs == pubs2)
self.failUnless(len(connection.queries) == 2)
def test_delete(self):
"""Test that a database delete clears a table cache."""
from testapp.models import Genre
g1 = Genre.objects.get(pk=1)
begin = Genre.objects.all().count()
g1.delete()
self.assertRaises(Genre.DoesNotExist, lambda: Genre.objects.get(pk=1))
connection.queries = []
self.failUnless(Genre.objects.all().count() == (begin -1))
self.failUnless(len(connection.queries) == 1)
Genre(title='Science Fiction', slug='scifi').save()
Genre(title='Fantasy', slug='rubbish').save()
Genre(title='Science Fact', slug='scifact').save()
count = Genre.objects.count()
Genre.objects.get(title='Fantasy')
q = base.message_queue()
Genre.objects.filter(title__startswith='Science').delete()
# this should not be cached
Genre.objects.get(title='Fantasy')
self.failUnless(not q.get_nowait())
def test_update(self):
from testapp.models import Genre
connection.queries = []
g1 = Genre.objects.get(pk=1)
Genre.objects.all().update(title="foo")
g2 = Genre.objects.get(pk=1)
self.failUnless(g1.title != g2.title)
self.failUnless(g2.title == "foo")
self.failUnless(len(connection.queries) == 3)
def test_empty_count(self):
"""Test for an empty count aggregate query with an IN"""
from testapp.models import Genre
books = Genre.objects.filter(id__in=[])
count = books.count()
self.failUnless(count == 0)
def test_aggregate_annotation(self):
"""Test aggregating an annotation """
from django.db.models import Count
from django.db.models import Sum
from testapp.models import Book
from django.core.paginator import Paginator
author_count = Book.objects.annotate(author_count=Count('authors')).aggregate(Sum('author_count'))
self.assertEquals(author_count['author_count__sum'],2)
# also test using the paginator, although this shouldn't be a big issue..
books = Book.objects.all().annotate(num_authors=Count('authors'))
paginator = Paginator(books, 25)
list_page = paginator.page(1)
def test_queryset_laziness(self):
"""This test exists to model the laziness of our queries; the
QuerySet cache should not alter the laziness of QuerySets."""
from testapp.models import Genre
connection.queries = []
qs = Genre.objects.filter(title__startswith='A')
qs = qs.filter(pk__lte=1)
qs = qs.order_by('pk')
# we should only execute the query at this point
arch = qs[0]
self.failUnless(len(connection.queries) == 1)
def test_order_by(self):
"""A basic test that our query caching is taking order clauses
into account."""
from testapp.models import Genre
connection.queries = []
first = list(Genre.objects.filter(title__startswith='A').order_by('slug'))
second = list(Genre.objects.filter(title__startswith='A').order_by('-slug'))
# test that we've indeed done two queries and that the orders
# of the results are reversed
self.failUnless((first[0], first[1] == second[1], second[0]))
self.failUnless(len(connection.queries) == 2)
def test_signals(self):
"""Test that the signals we say we're sending are being sent."""
from testapp.models import Genre
from johnny.signals import qc_hit, qc_miss, qc_skip
connection.queries = []
misses = []
hits = []
def qc_hit_listener(sender, **kwargs):
hits.append(kwargs['key'])
def qc_miss_listener(*args, **kwargs):
misses.append(kwargs['key'])
qc_hit.connect(qc_hit_listener)
qc_miss.connect(qc_miss_listener)
qc_skip.connect(qc_miss_listener)
first = list(Genre.objects.filter(title__startswith='A').order_by('slug'))
second = list(Genre.objects.filter(title__startswith='A').order_by('slug'))
self.failUnless(len(misses) == len(hits) == 1)
def test_in_values_list(self):
from testapp.models import Publisher, Book
from johnny.cache import get_tables_for_query
pubs = Publisher.objects.all()
books = Book.objects.filter(publisher__in=pubs.values_list("id", flat=True))
tables = list(sorted(get_tables_for_query(books.query)))
self.assertEqual(["testapp_book", "testapp_publisher"], tables)
class MultiModelTest(QueryCacheBase):
fixtures = base.johnny_fixtures
def test_foreign_keys(self):
"""Test that simple joining (and deferred loading) functions as we'd
expect when involving multiple tables. In particular, a query that
joins 2 tables should invalidate when either table is invalidated."""
from testapp.models import Genre, Book, Publisher, Person
connection.queries = []
books = list(Book.objects.select_related('publisher'))
books = list(Book.objects.select_related('publisher'))
str(books[0].genre)
# this should all have done one query..
self.failUnless(len(connection.queries) == 1)
books = list(Book.objects.select_related('publisher'))
# invalidate the genre key, which shouldn't impact the query
Genre(title='Science Fiction', slug='scifi').save()
after_save = len(connection.queries)
books = list(Book.objects.select_related('publisher'))
self.failUnless(len(connection.queries) == after_save)
# now invalidate publisher, which _should_
p = Publisher(title='McGraw Hill', slug='mcgraw-hill')
p.save()
after_save = len(connection.queries)
books = list(Book.objects.select_related('publisher'))
self.failUnless(len(connection.queries) == after_save + 1)
# the query should be cached again...
books = list(Book.objects.select_related('publisher'))
# this time, create a book and the query should again be uncached..
Book(title='Anna Karenina', slug='anna-karenina', publisher=p).save()
after_save = len(connection.queries)
books = list(Book.objects.select_related('publisher'))
self.failUnless(len(connection.queries) == after_save + 1)
def test_invalidate(self):
"""Test for the module-level invalidation function."""
from Queue import Queue as queue
from testapp.models import Book, Genre, Publisher
from johnny.cache import invalidate
q = base.message_queue()
b = Book.objects.get(id=1)
invalidate(Book)
b = Book.objects.get(id=1)
first, second = q.get_nowait(), q.get_nowait()
self.failUnless(first == second == False)
g = Genre.objects.get(id=1)
p = Publisher.objects.get(id=1)
invalidate('testapp_genre', Publisher)
g = Genre.objects.get(id=1)
p = Publisher.objects.get(id=1)
fg,fp,sg,sp = [q.get() for i in range(4)]
self.failUnless(fg == fp == sg == sp == False)
def test_many_to_many(self):
from testapp.models import Book, Person
b = Book.objects.get(pk=1)
p1 = Person.objects.get(pk=1)
p2 = Person.objects.get(pk=2)
b.authors.add(p1)
connection.queries = []
list(b.authors.all())
#many to many should be invalidated
self.failUnless(len(connection.queries) == 1)
b.authors.remove(p1)
b = Book.objects.get(pk=1)
list(b.authors.all())
#can't determine the queries here, 1.1 and 1.2 uses them differently
connection.queries = []
#many to many should be invalidated,
#person is not invalidated since we just want
#the many to many table to be
p1 = Person.objects.get(pk=1)
self.failUnless(len(connection.queries) == 0)
p1.books.add(b)
connection.queries = []
#many to many should be invalidated,
#this is the first query
list(p1.books.all())
b = Book.objects.get(pk=1)
self.failUnless(len(connection.queries) == 1)
#query should be cached
self.failUnless(len(list(p1.books.all())) == 1)
self.failUnless(len(connection.queries) == 1)
#testing clear
b.authors.clear()
self.failUnless(b.authors.all().count() == 0)
self.failUnless(p1.books.all().count() == 0)
b.authors.add(p1)
self.failUnless(b.authors.all().count() == 1)
queries = len(connection.queries)
#should be cached
b.authors.all().count()
self.failUnless(len(connection.queries) == queries)
self.failUnless(p1.books.all().count() == 1)
p1.books.clear()
self.failUnless(b.authors.all().count() == 0)
def test_subselect_support(self):
"""Test that subselects are handled properly."""
from django import db
db.reset_queries()
from testapp.models import Book, Person, PersonType
author_types = PersonType.objects.filter(title='Author')
author_people = Person.objects.filter(person_types__in=author_types)
written_books = Book.objects.filter(authors__in=author_people)
q = base.message_queue()
self.failUnless(len(db.connection.queries) == 0)
count = written_books.count()
self.failUnless(q.get() == False)
# execute the query again, this time it's cached
self.failUnless(written_books.count() == count)
self.failUnless(q.get() == True)
# change the person type of 'Author' to something else
pt = PersonType.objects.get(title='Author')
pt.title = 'NonAuthor'
pt.save()
self.failUnless(PersonType.objects.filter(title='Author').count() == 0)
q.clear()
db.reset_queries()
# now execute the same query; the result should be diff and it should be
# a cache miss
new_count = written_books.count()
self.failUnless(new_count != count)
self.failUnless(q.get() == False)
PersonType.objects.filter(title='NonAuthor').order_by('-title')[:5]
def test_foreign_key_delete_cascade(self):
"""From #32, test that if you have 'Foo' and 'Bar', with bar.foo => Foo,
and you delete foo, bar.foo is also deleted, which means you have to
invalidate Bar when deletions are made in Foo (but not changes)."""
class TransactionSupportTest(TransactionQueryCacheBase):
fixtures = base.johnny_fixtures
def _run_threaded(self, query, queue):
"""Runs a query (as a string) from testapp in another thread and
puts (hit?, result) on the provided queue."""
from threading import Thread
def _inner(_query):
from testapp.models import Genre, Book, Publisher, Person
from johnny.signals import qc_hit, qc_miss, qc_skip
msg = []
def hit(*args, **kwargs):
msg.append(True)
def miss(*args, **kwargs):
msg.append(False)
qc_hit.connect(hit)
qc_miss.connect(miss)
qc_skip.connect(miss)
obj = eval(_query)
msg.append(obj)
queue.put(msg)
if connections is not None:
#this is to fix a race condition with the
#thread to ensure that we close it before
#the next test runs
connections['default'].close()
t = Thread(target=_inner, args=(query,))
t.start()
t.join()
def tearDown(self):
from django.db import transaction
if transaction.is_managed():
if transaction.is_dirty():
transaction.rollback()
transaction.managed(False)
transaction.leave_transaction_management()
def test_transaction_commit(self):
"""Test transaction support in Johnny."""
from Queue import Queue as queue
from django.db import transaction
from testapp.models import Genre, Publisher
from johnny import cache
if django.VERSION[:2] < (1, 3):
if settings.DATABASE_ENGINE == 'sqlite3':
print "\n Skipping test requiring multiple threads."
return
else:
if settings.DATABASES.get('default', {}).get('ENGINE', '').endswith('sqlite3'):
print "\n Skipping test requiring multiple threads."
return
self.failUnless(transaction.is_managed() == False)
self.failUnless(transaction.is_dirty() == False)
connection.queries = []
cache.local.clear()
q = queue()
other = lambda x: self._run_threaded(x, q)
# load some data
start = Genre.objects.get(id=1)
other('Genre.objects.get(id=1)')
hit, ostart = q.get()
# these should be the same and should have hit cache
self.failUnless(hit)
self.failUnless(ostart == start)
# enter manual transaction management
transaction.enter_transaction_management()
transaction.managed()
start.title = 'Jackie Chan Novels'
# local invalidation, this key should hit the localstore!
nowlen = len(cache.local)
start.save()
self.failUnless(nowlen != len(cache.local))
# perform a read OUTSIDE this transaction... it should still see the
# old gen key, and should still find the "old" data
other('Genre.objects.get(id=1)')
hit, ostart = q.get()
self.failUnless(hit)
self.failUnless(ostart.title != start.title)
transaction.commit()
# now that we commit, we push the localstore keys out; this should be
# a cache miss, because we never read it inside the previous transaction
other('Genre.objects.get(id=1)')
hit, ostart = q.get()
self.failUnless(not hit)
self.failUnless(ostart.title == start.title)
transaction.managed(False)
transaction.leave_transaction_management()
def test_transaction_rollback(self):
"""Tests johnny's handling of transaction rollbacks.
Similar to the commit, this sets up a write to a db in a transaction,
reads from it (to force a cache write of sometime), then rolls back."""
from Queue import Queue as queue
from django.db import transaction
from testapp.models import Genre, Publisher
from johnny import cache
if django.VERSION[:2] < (1, 3):
if settings.DATABASE_ENGINE == 'sqlite3':
print "\n Skipping test requiring multiple threads."
return
else:
if settings.DATABASES.get('default', {}).get('ENGINE', '').endswith('sqlite3'):
print "\n Skipping test requiring multiple threads."
return
self.failUnless(transaction.is_managed() == False)
self.failUnless(transaction.is_dirty() == False)
connection.queries = []
cache.local.clear()
q = queue()
other = lambda x: self._run_threaded(x, q)
# load some data
start = Genre.objects.get(id=1)
other('Genre.objects.get(id=1)')
hit, ostart = q.get()
# these should be the same and should have hit cache
self.failUnless(hit)
self.failUnless(ostart == start)
# enter manual transaction management
transaction.enter_transaction_management()
transaction.managed()
start.title = 'Jackie Chan Novels'
# local invalidation, this key should hit the localstore!
nowlen = len(cache.local)
start.save()
self.failUnless(nowlen != len(cache.local))
# perform a read OUTSIDE this transaction... it should still see the
# old gen key, and should still find the "old" data
other('Genre.objects.get(id=1)')
hit, ostart = q.get()
self.failUnless(hit)
self.failUnless(ostart.title != start.title)
# perform a READ inside the transaction; this should hit the localstore
# but not the outside!
nowlen = len(cache.local)
start2 = Genre.objects.get(id=1)
self.failUnless(start2.title == start.title)
self.failUnless(len(cache.local) > nowlen)
transaction.rollback()
# we rollback, and flush all johnny keys related to this transaction
# subsequent gets should STILL hit the cache in the other thread
# and indeed, in this thread.
self.failUnless(transaction.is_dirty() == False)
other('Genre.objects.get(id=1)')
hit, ostart = q.get()
self.failUnless(hit)
start = Genre.objects.get(id=1)
self.failUnless(ostart.title == start.title)
transaction.managed(False)
transaction.leave_transaction_management()
def test_savepoint_rollback(self):
"""Tests rollbacks of savepoints"""
from django.db import transaction
from testapp.models import Genre, Publisher
from johnny import cache
if not connection.features.uses_savepoints:
return
self.failUnless(transaction.is_managed() == False)
self.failUnless(transaction.is_dirty() == False)
connection.queries = []
cache.local.clear()
transaction.enter_transaction_management()
transaction.managed()
g = Genre.objects.get(pk=1)
start_title = g.title
g.title = "Adventures in Savepoint World"
g.save()
g = Genre.objects.get(pk=1)
self.failUnless(g.title == "Adventures in Savepoint World")
sid = transaction.savepoint()
g.title = "In the Void"
g.save()
g = Genre.objects.get(pk=1)
self.failUnless(g.title == "In the Void")
transaction.savepoint_rollback(sid)
g = Genre.objects.get(pk=1)
self.failUnless(g.title == "Adventures in Savepoint World")
transaction.rollback()
g = Genre.objects.get(pk=1)
self.failUnless(g.title == start_title)
transaction.managed(False)
transaction.leave_transaction_management()
def test_savepoint_commit(self):
"""Tests a transaction commit (release)
The release actually pushes the savepoint back into the dirty stack,
but at the point it was saved in the transaction"""
from django.db import transaction
from testapp.models import Genre, Publisher
from johnny import cache
if not connection.features.uses_savepoints:
return
self.failUnless(transaction.is_managed() == False)
self.failUnless(transaction.is_dirty() == False)
connection.queries = []
cache.local.clear()
transaction.enter_transaction_management()
transaction.managed()
g = Genre.objects.get(pk=1)
start_title = g.title
g.title = "Adventures in Savepoint World"
g.save()
g = Genre.objects.get(pk=1)
self.failUnless(g.title == "Adventures in Savepoint World")
sid = transaction.savepoint()
g.title = "In the Void"
g.save()
connection.queries = []
#should be a database hit because of save in savepoint
g = Genre.objects.get(pk=1)
self.failUnless(len(connection.queries) == 1)
self.failUnless(g.title == "In the Void")
transaction.savepoint_commit(sid)
#should be a cache hit against the dirty store
connection.queries = []
g = Genre.objects.get(pk=1)
self.failUnless(connection.queries == [])
self.failUnless(g.title == "In the Void")
transaction.commit()
#should have been pushed up to cache store
g = Genre.objects.get(pk=1)
self.failUnless(connection.queries == [])
self.failUnless(g.title == "In the Void")
transaction.managed(False)
transaction.leave_transaction_management()
import johnny
class TransactionManagerTestCase(base.TransactionJohnnyTestCase):
def setUp(self):
self.middleware = middleware.QueryCacheMiddleware()
def tearDown(self):
from django.db import transaction
if transaction.is_managed():
transaction.managed(False)
def test_savepoint_localstore_flush(self):
"""
This is a very simple test to see if savepoints will actually
be committed, i.e. flushed out from localstore into cache.
"""
from django.db import transaction
transaction.enter_transaction_management()
transaction.managed()
TABLE_NAME = 'test_table'
cache_backend = johnny.cache.get_backend()
cache_backend.patch()
keyhandler = cache_backend.keyhandler
keygen = keyhandler.keygen
tm = cache_backend.cache_backend
# First, we set one key-val pair generated for our non-existing table.
table_key = keygen.gen_table_key(TABLE_NAME)
tm.set(table_key, 'val1')
# Then we create a savepoint.
# The key-value pair is moved into 'trans_sids' item of localstore.
tm._create_savepoint('savepoint1')
# We then commit all the savepoints (i.e. only one in this case)
# The items stored in 'trans_sids' should be moved back to the
# top-level dictionary of our localstore
tm._commit_all_savepoints()
# And this checks if it actually happened.
self.failUnless(table_key in tm.local)
|
wukong.py | # -*- coding: utf-8-*-
import random
import threading
from snowboy import snowboydecoder
from robot import config, utils, constants, logging, statistic, Player
from robot.Updater import Updater
from robot.ConfigMonitor import ConfigMonitor
from robot.Conversation import Conversation
from server import server
from watchdog.observers import Observer
from robot import KeyHandler
import sys
import os
import signal
import hashlib
import fire
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logger = logging.getLogger(__name__)
class Wukong(object):
_profiling = False
_dev = False
wakes = [
'在的,Sir',
'您说',
'怎么了',
'Hello',
'Hi']
def init(self):
global conversation
self.detector = None
self._interrupted = False
print('''
********************************************************
* jarvis- 中文语音对话机器人 *
********************************************************
如需退出,可以按 Ctrl-4 组合键。
''')
config.init()
self._conversation = Conversation(self._profiling)
self._conversation.say_call_back = server.onSay
self._observer = Observer()
event_handler = ConfigMonitor(self._conversation)
self._observer.schedule(event_handler, constants.CONFIG_PATH, False)
self._observer.schedule(event_handler, constants.DATA_PATH, False)
self._observer.start()
def _signal_handler(self, signal, frame):
self._interrupted = True
utils.clean()
self._observer.stop()
# 语音唤醒回调
def _detected_callback(self):
if not utils.is_proper_time():
logger.warning('勿扰模式开启中')
return
if self._conversation.isRecording:
logger.warning('正在录音中,跳过')
return
server.onSay("{\"action_info\": \"wake\",\"msg\": \"唤醒\"}")
# self._conversation.say(random.choice(self.wakes))
Player.play(constants.getData('bee_wake.mp3'))
logger.info('开始录音')
self._conversation.interrupt()
self._conversation.isRecording = True
# 录音完成回调
def _recored_callback(self, fp):
server.onSay("{\"action_info\": \"think\",\"msg\": \"思考\"}")
logger.info('结束录音 开始思考')
Player.play(constants.getData('bee_complte.mp3'))
self._conversation.converse(fp, self._end_think)
# 结束思考回调
def _end_think(self):
server.onSay("{\"action_info\": \"stop_think\",\"msg\": \"思考结束\"}")
logger.info("结束思考")
"""
手动唤醒
"""
def wake(self):
self.detector.active_now()
logger.info("手动唤醒!!")
def _do_not_bother_on_callback(self):
if config.get('/do_not_bother/hotword_switch', False):
utils.do_not_bother = True
Player.play(constants.getData('off.wav'))
logger.info('勿扰模式打开')
def _do_not_bother_off_callback(self):
if config.get('/do_not_bother/hotword_switch', False):
utils.do_not_bother = False
Player.play(constants.getData('on.wav'))
logger.info('勿扰模式关闭')
def fuck(self):
logger.info("????? v")
def _interrupt_callback(self):
return self._interrupted
# 启动完成语音提示
def say_allcomplete(self):
self._conversation.say('{} 已经启动'.format(config.get('first_name', 'jarvis')), True)
def run(self):
self.init()
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, self._signal_handler)
# site
server.run(self._conversation, self, self.detector)
# statistic.report(0)
t = threading.Thread(target=self.openBrawer)
t.start()
Player.play(constants.getData('robot_open.mp3'), onCompleted=self.say_allcomplete(), volum=0.7)
KeyHandler.start()
try:
self.initDetector()
except AttributeError:
logger.error('初始化离线唤醒功能失败')
pass
def initDetector(self):
if self.detector is not None:
self.detector.terminate()
models = [
constants.getHotwordModel(config.get('hotword', 'wukong.pmdl')),
constants.getHotwordModel(utils.get_do_not_bother_on_hotword()),
constants.getHotwordModel(utils.get_do_not_bother_off_hotword())
]
for item in models:
logger.info(item)
self.detector = snowboydecoder.HotwordDetector(models, sensitivity=config.get('sensitivity', 0.5))
# main loop
try:
self.detector.start(detected_callback=[self._detected_callback,
self._do_not_bother_on_callback,
self._do_not_bother_off_callback,
self.fuck],
audio_recorder_callback=self._recored_callback,
interrupt_check=self._interrupt_callback,
silent_count_threshold=config.get('silent_threshold', 15),
recording_timeout=config.get('recording_timeout', 5) * 4,
sleep_time=0.03)
self.detector.terminate()
except Exception as e:
logger.critical('离线唤醒机制初始化失败:{}'.format(e))
def md5(self, password):
return hashlib.md5(password.encode('utf-8')).hexdigest()
def update(self):
updater = Updater()
return updater.update()
def fetch(self):
updater = Updater()
updater.fetch()
def restart(self):
logger.critical('程序重启...')
try:
self.detector.terminate()
except AttributeError:
pass
python = sys.executable
os.execl(python, python, *sys.argv)
def profiling(self):
logger.info('性能调优')
self._profiling = True
self.run()
def dev(self):
logger.info('使用测试环境')
self._dev = True
self.run()
def openBrawer(self):
pass
# 启动浏览器
#os.system("chromium-browser --disable-popup-blocking --no-first-run --disable-desktop-notifications --kiosk \"http://localhost:5000/magic\"")
|
cloudevents-receiver.py | #!/usr/bin/env python3
# copy to /usr/local/bin/cloudevents-receiver and use with cloudevents.service
import blinkt
import colorsys
import json
import random
import threading
import time
from flask import Flask, request
from cloudevents.http import from_http
app = Flask(__name__)
stop = threading.Event()
lock = threading.Lock()
activeThread = threading.Thread(name="default", target=(), args=(lock,stop,))
actions = [
'blink',
'brighten',
'clear',
'darken',
'rainbow',
'status'
]
brightness = 0.1
blinkt.set_brightness(brightness)
blinkt.set_clear_on_exit()
blinkt.clear()
blinkt.show()
@app.route("/", methods=["POST"])
def home():
event = from_http(request.headers, request.get_data())
if event['type'] == 'dev.pulsifer.blinky.request':
action = event.data['action']
if action in actions:
global activeThread
if action == 'blink':
stop_running_thread()
activeThread = threading.Thread(name="blink", target=blink, args=(lock,stop,))
activeThread.start()
elif action == 'rainbow':
stop_running_thread()
activeThread = threading.Thread(name="rainbow", target=rainbow, args=(lock,stop,))
activeThread.start()
else: eval(action)()
return json.dumps({
'action': activeThread.getName(),
'alive': activeThread.is_alive(),
'brightness': brightness,
})
return "", 501
return "", 400
def stop_running_thread():
if activeThread.isAlive():
stop.set()
activeThread.join()
stop.clear()
def brighten():
global brightness
if brightness < 1: brightness += 0.1
if brightness > 1: brightness = 1
blinkt.set_brightness(brightness)
blinkt.show()
def darken():
global brightness
if brightness > 0.1: brightness -= 0.1
if brightness < 0.1: brightness = 0.1
blinkt.set_brightness(brightness)
blinkt.show()
def clear():
stop_running_thread()
blinkt.clear()
blinkt.show()
def status():
pass
def blink(lock, stop):
with lock:
while not stop.is_set():
for i in range(blinkt.NUM_PIXELS):
blinkt.set_pixel(i, random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
blinkt.show()
time.sleep(0.1)
blinkt.clear()
def rainbow(lock, stop):
spacing = 360.0 / 16.0
with lock:
while not stop.is_set():
hue = int(time.time() * 100) % 360
for x in range(blinkt.NUM_PIXELS):
offset = x * spacing
h = ((hue + offset) % 360) / 360.0
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)]
blinkt.set_pixel(x, r, g, b)
blinkt.show()
time.sleep(0.001)
blinkt.clear()
if __name__ == "__main__":
app.run(port=3000, host="0.0.0.0")
|
servidor_web.py | #!/usr/bin/env python3
# Authors:
# Brito Segura Angel
# Tovar Herrera Carlos Eduardo
# Zazueta Barajas Sebastián Pedro
# License: MIT
# Version 1.4
# Date: 07/12/2021
# Description: Servidor web para Control de Invernadero
# Paquetes para crear el servidor web
import os
import sys
import json
import magic
from http.server import BaseHTTPRequestHandler, HTTPServer
# Control del Invernadero (funciones del sistema embebido)
from invernadero import *
# Manejo de hilos para procesos concurrentes
from threading import Thread
# Importación de la librería de control del GPIO de la Raspberry Pi
#import RPi.GPIO as GPIO #Descomentar para una implementación física
# Nombre o dirección IP del sistema anfitrión del servidor web
# address = "localhost" #Pruebas locales
address = "192.168.1.254" #IP asignada en la configuración de la tarjeta controladora
# Puerto en el cual el servidor estará atendiendo solicitudes HTTP
# El default de un servidor web en producción debe ser 80
port = 8080
archivoHTML = "inicio.html"
#Heredando la clase para habilitar un servidor web sencillo
class WebServer(BaseHTTPRequestHandler):
#Función para servir cualquier archivo encontrado en el servidor
def _serve_file(self, rel_path):
if not os.path.isfile(rel_path):
self.send_error(404) #Si el archivo que se desea acceder no se encuentra
return
self.send_response(200)
mime = magic.Magic(mime=True)
#Para cada archivo se debe especificar su tipo en la cabecera
if rel_path.find(".css"):
self.send_header("Content-type", "text/css")
self.end_headers()
else:
#Se proporciona el tipo mime del archivo en la cabecera
self.send_header("Content-type", mime.from_file(rel_path))
self.end_headers()
with open(rel_path, 'rb') as file:
self.wfile.write(file.read()) #Imprimir código HTML al socket
#Función para controlar el JSON enviado por el usuario y hacer las acciones respectivas
def _parse_post(self, json_obj):
if not 'action' in json_obj or not 'value' in json_obj:
print("Datos JSON incorrectos")
return
funciones = {
'irrigacion' : irrigacion, #Sistema de Irrigación
'temperatura': temperatura, #Control de temperatura
'radiador' : radiador, #Control de potencia del foco incandescente
'ventilador' : ventilador, #Control de potencia del motor de DC
'programacion': programaInvernadero #Programado de ciclos de temperatura e irrigado
}
accion = funciones.get(json_obj['action'])
if accion:
accion(json_obj['value'])
#Función para desplegar el archivo de interfaz de usuario (página principal)
def _serve_ui_file(self):
if not os.path.isfile(archivoHTML):
err = archivoHTML + " no encontrado"
self.wfile.write(bytes(err, "utf-8")) #Mostrar en la página web el error producido
print(err)
return
try:
with open(archivoHTML, "r") as f:
content = "\n".join(f.readlines())
except:
content = "Error leyendo " + archivoHTML
self.wfile.write(bytes(content, "utf-8")) #Devolver el código HTML como cadena binaria
""" Función do_GET:
Controla todas las solicitudes recibidas via GET, es decir, páginas HTML
Por seguridad, no se analizan variables que lleguen por este método """
def do_GET(self):
# Si se accede a la raiz, se responde con la interfaz por defecto (incio.html)
if self.path == '/': #Variable para indicar el archivo solicitado por el usuario
self.send_response(200) #Código de respuesta satisfactorio (OK) de una solicitud
""" La cabecera HTTP siempre debe contener el tipo de datos mime del contenido
con el que responde el servidor """
self.send_header("Content-type", "text/html")
self.end_headers() # Fin de cabecera
self._serve_ui_file() #Desplegar el contenido del código HTML en la página principal
else:
# En caso contrario, se verifica que el archivo exista y se sirve
self._serve_file(self.path[1:])
""" Función do_POST:
Controla todas las solicitudes recibidas via POST, es decir, envíos de formulario.
Recibe y procesa los datos para evitar inyección de código.
Gestiona los comandos para la Raspberry Pi a través de datos JSON.
JSON: para hacer llamadas asíncronas del cliente y sin respuesta por parte del servidor """
def do_POST(self):
# Primero se obtiene la longitud de la cadena de datos recibida
content_length = int(self.headers.get('Content-Length'))
if content_length < 1:
#Si es menor a 1, no se recibió ninguna cadena y se retorna al flujo del programa principal
return
# Se lee toda la cadena de datos recibida del usuario
post_data = self.rfile.read(content_length)
# Finalmente, se decodifica el objeto JSON y se procesan los datos
try:
#Interpretar los datos recibidos como cadenas de texto UTF-8
jobj = json.loads(post_data.decode("utf-8")) #Crear diccionario de Python
self._parse_post(jobj) #Procesar los datos recibidos por el usuario
except:
# Se descartan cadenas de datos mal formateadas
print(sys.exc_info()) #Muestra cualquier error en la ejecución de la función actual
print("Datos POST no reconocidos")
#Inicializar el servidor
def server():
# Inicializa una nueva instancia de HTTPServer con el HTTPRequestHandler definido
webServer = HTTPServer((address, port), WebServer)
print("Servidor iniciado")
print ("\tAtendiendo solicitudes en http://{}:{}".format(address, port))
try:
# Mantiene al servidor web ejecutándose en segundo plano
webServer.serve_forever()
except KeyboardInterrupt:
# Maneja la interrupción de cierre con CTRL+C
pass
except:
print(sys.exc_info()) #Muestra cualquier error en la ejecución de la función actual
# Detiene el servidor web cerrando todas las conexiones
webServer.server_close()
# Reporta parada del servidor web en consola
print("\nServidor detenido.")
#Control de los procesos del sistema embebido
def main():
try:
hilo1 = Thread(target=server)
hilo2 = Thread(target=iniciaControl) #Comentar si es una implementación física
hilo3 = Thread(target=ciclosTempIrr)
hilo1.start()
hilo2.start() #Comentar si es una implementación física
hilo3.start()
""" Descomentar si es una implementación física
# Control de sensores a través de hilos
sensor1 = Thread(target=leerTemperatura)
sensor1.start()
sensor2 = Thread(target=registrarHumedad)
sensor2.start()
"""
except:
print("Terminando programa")
# Reiniciar todos los puertos para que estén en su estado por defecto (entradas)
#GPIO.cleanup() #Descomentar si es una implementación física
# Punto de anclaje de la función principal
if __name__ == "__main__":
main()
|
python_layers.py | import sys
sys.path.insert(0, '../../python/')
import caffe
import random
import numpy as np
from threading import Thread
import pdb
from python_utils import *
class sortDataRead(object):
def __init__(self, data, batch_size, max_value, thread_result):
self.data = data
self.n = data.shape[0]
self.len_sequence = self.data.shape[1]
self.iteration = 0
self.thread_result = thread_result
self.batch_size = batch_size
self.max_value = max_value
def __call__(self):
rand_mat = np.zeros((self.batch_size, self.len_sequence))
if self.iteration + self.batch_size >= self.n:
rand_mat[:self.n-self.iteration,:] = self.data[self.iteration:self.n,:]
rand_mat[self.n-self.iteration:,:] = self.data[:self.n-self.iteration,:]
self.iteration = self.n-self.iteration
else:
rand_mat = self.data[self.iteration:self.iteration+self.batch_size,:]
self.iteration += self.batch_size
label_mat = np.sort(rand_mat, axis=1)
train_label_mat = np.argsort(rand_mat, axis=1)
rand_mat_one_hot = np.zeros((self.batch_size, self.len_sequence, self.max_value))
label_mat_one_hot = np.zeros((self.batch_size, self.len_sequence, self.max_value))
a1_idx = [[i]*self.len_sequence for i in range(self.batch_size)]
a1_idx = [i for j in a1_idx for i in j]
a2_idx = range(self.len_sequence)*self.batch_size
rand_mat_one_hot[a1_idx, a2_idx, np.ndarray.flatten(rand_mat)] = 1
label_mat_one_hot[a1_idx, a2_idx, np.ndarray.flatten(label_mat)] = 1
label_one_hot_mat_shift = np.zeros((self.batch_size, self.len_sequence, self.max_value))
label_one_hot_mat_shift[:,1:,:] = label_mat_one_hot[:,:-1,:]
self.thread_result['rand_mat'] = rand_mat_one_hot
self.thread_result['label_mat'] = label_one_hot_mat_shift
self.thread_result['train_label_mat'] = train_label_mat #train_label_mat.reshape((self.batch_size, self.len_sequence, 1))
class sortDataGeneratorOne(object):
def __init__(self, len_sequence, max_value, batch_size, thread_result):
self.len_sequence = len_sequence
self.max_value = max_value
self.batch_size = batch_size
self.thread_result = thread_result
self.write_txt = open('train_generate_sents.txt', 'w')
self.write_txt.writelines('begin\n')
self.write_txt.close()
def __call__(self):
self.write_txt = open('train_generate_sents.txt', 'a')
rand_mat = np.random.rand(self.batch_size, self.len_sequence)
#rand_mat = np.array(rand_mat*1000, dtype=int)
label_mat = np.sort(rand_mat, axis=1)
train_label_mat = np.argsort(rand_mat, axis=1)
label_shift = np.zeros((self.batch_size, self.len_sequence))
label_shift[:,1:] = label_mat[:,:-1]
self.thread_result['rand_mat'] = rand_mat.reshape((self.batch_size, self.len_sequence, 1))
self.thread_result['label_mat'] = label_shift.reshape((self.batch_size, self.len_sequence, 1))
#self.thread_result['train_label_mat'] = train_label_mat_one_hot
self.thread_result['train_label_mat'] = train_label_mat #train_label_mat.reshape((self.batch_size, self.len_sequence, 1))
for i in range(self.batch_size):
self.write_txt.writelines('%s\n' %(' '.join([str(m) for m in np.ndarray.tolist(rand_mat[i,:])])))
self.write_txt.close()
class sortDataGenerator(object):
def __init__(self, len_sequence, max_value, batch_size, thread_result):
self.len_sequence = len_sequence
self.max_value = max_value
self.batch_size = batch_size
self.thread_result = thread_result
def __call__(self):
rand_mat = np.random.rand(self.batch_size, self.len_sequence)
rand_mat = np.array(rand_mat*self.max_value, dtype=int)
label_mat = np.sort(rand_mat, axis=1)
train_label_mat = np.argsort(rand_mat, axis=1)
rand_mat_one_hot = np.zeros((self.batch_size, self.len_sequence, self.max_value))
label_mat_one_hot = np.zeros((self.batch_size, self.len_sequence, self.max_value))
train_label_mat_one_hot = np.zeros((self.batch_size, self.len_sequence, self.len_sequence))
a1_idx = [[i]*self.len_sequence for i in range(self.batch_size)]
a1_idx = [i for j in a1_idx for i in j]
a2_idx = range(self.len_sequence)*self.batch_size
rand_mat_one_hot[a1_idx, a2_idx, np.ndarray.flatten(rand_mat)] = 1
label_mat_one_hot[a1_idx, a2_idx, np.ndarray.flatten(label_mat)] = 1
train_label_mat_one_hot[a1_idx, a2_idx, np.ndarray.flatten(train_label_mat)] = 1
label_one_hot_mat_shift = np.zeros((self.batch_size, self.len_sequence, self.max_value))
label_one_hot_mat_shift[:,1:,:] = label_mat_one_hot[:,:-1,:]
self.thread_result['rand_mat'] = rand_mat_one_hot
self.thread_result['label_mat'] = label_one_hot_mat_shift
#self.thread_result['train_label_mat'] = train_label_mat_one_hot
self.thread_result['train_label_mat'] = train_label_mat #train_label_mat.reshape((self.batch_size, self.len_sequence, 1))
# def __call__(self):
# rand_mat = np.random.rand(self.batch_size, self.len_sequence)
# rand_mat = np.array(rand_mat*self.max_value, dtype=int)
# label_mat = np.sort(rand_mat, axis=1)
# rand_mat_one_hot = np.zeros((self.batch_size, self.len_sequence, self.max_value))
# label_mat_one_hot = np.zeros((self.batch_size, self.len_sequence, self.max_value))
# a1_idx = [[i]*self.len_sequence for i in range(self.batch_size)]
# a1_idx = [i for j in a1_idx for i in j]
# a2_idx = range(self.len_sequence)*self.batch_size
# rand_mat_one_hot[a1_idx, a2_idx, np.ndarray.flatten(rand_mat)] = 1
# label_mat_one_hot[a1_idx, a2_idx, np.ndarray.flatten(label_mat)] = 1
#
# self.thread_result['rand_mat'] = rand_mat_one_hot
# self.thread_result['label_mat'] = label_mat_one_hot
class caffeDataLayer(caffe.Layer):
def dispatch_worker(self):
assert self.thread is None
self.thread = Thread(target=self.batchAdvancer)
self.thread.start()
def join_worker(self):
assert self.thread is not None
self.thread.join()
self.thread = None
def forward(self, bottom, top):
if self.thread is not None:
self.join_worker()
for top_index, name in zip(range(len(top)), self.top_names):
top[top_index].data[...] = self.thread_result[name]
self.dispatch_worker()
def reshape(self, bottom, top):
pass
def backward(self, bottom, top):
pass
class generateSortData(caffeDataLayer):
def setup(self, bottom, top):
self.params = eval(self.param_str)
assert 'len_sequence' in self.params.keys()
assert 'max_value' in self.params.keys()
assert 'batch_size' in self.params.keys()
self.len_sequence = self.params['len_sequence']
self.max_value = self.params['max_value']
self.batch_size = self.params['batch_size']
self.thread_result = {}
self.thread = None
self.top_names = ['rand_mat', 'label_mat', 'train_label_mat']
self.batchAdvancer = sortDataGenerator(self.params['len_sequence'], self.params['max_value'],
self.params['batch_size'], self.thread_result)
self.dispatch_worker()
self.join_worker()
print 'Outputs:', self.top_names
if len(top) != len(self.top_names):
raise Exception('Incorrect number of outputs (expected %d, got %d)' %
(len(self.top_names), len(top)))
for top_index, name in enumerate(self.top_names):
if name == 'train_label_mat':
#shape = (self.batch_size, self.len_sequence, 1)
shape = (self.batch_size, self.len_sequence)
else:
shape = (self.batch_size, self.len_sequence, self.max_value)
top[top_index].reshape(*shape)
class generateSortDataOne(caffeDataLayer):
def setup(self, bottom, top):
self.params = eval(self.param_str)
assert 'len_sequence' in self.params.keys()
assert 'max_value' in self.params.keys()
assert 'batch_size' in self.params.keys()
self.len_sequence = self.params['len_sequence']
self.max_value = self.params['max_value']
self.batch_size = self.params['batch_size']
self.thread_result = {}
self.thread = None
self.top_names = ['rand_mat', 'label_mat', 'train_label_mat']
self.batchAdvancer = sortDataGeneratorOne(self.params['len_sequence'],
self.params['max_value'],
self.params['batch_size'],
self.thread_result)
self.dispatch_worker()
self.join_worker()
print 'Outputs:', self.top_names
if len(top) != len(self.top_names):
raise Exception('Incorrect number of outputs (expected %d, got %d)' %
(len(self.top_names), len(top)))
for top_index, name in enumerate(self.top_names):
if name == 'train_label_mat':
#shape = (self.batch_size, self.len_sequence, 1)
shape = (self.batch_size, self.len_sequence)
else:
shape = (self.batch_size, self.len_sequence, 1)
top[top_index].reshape(*shape)
class readSortData(caffeDataLayer):
def setup(self, bottom, top):
self.params = eval(self.param_str)
assert 'len_sequence' in self.params.keys()
assert 'max_value' in self.params.keys()
assert 'batch_size' in self.params.keys()
self.len_sequence = self.params['len_sequence']
self.max_value = self.params['max_value']
self.batch_size = self.params['batch_size']
data_txt = 'utils/data/ls_%d_mv_%d_train.txt' %(self.len_sequence, self.max_value)
self.data = read_data(data_txt)
self.thread_result = {}
self.thread = None
self.top_names = ['rand_mat', 'label_mat', 'train_label_mat']
self.batchAdvancer = sortDataRead(self.data, self.params['batch_size'],
self.params['max_value'], self.thread_result)
self.dispatch_worker()
self.join_worker()
print 'Outputs:', self.top_names
if len(top) != len(self.top_names):
raise Exception('Incorrect number of outputs (expected %d, got %d)' %
(len(self.top_names), len(top)))
for top_index, name in enumerate(self.top_names):
if name == 'train_label_mat':
#shape = (self.batch_size, self.len_sequence, 1)
shape = (self.batch_size, self.len_sequence)
else:
shape = (self.batch_size, self.len_sequence, self.max_value)
top[top_index].reshape(*shape)
|
PlexConnect.py | #!/usr/bin/env python
"""
PlexConnect
Sources:
inter-process-communication (queue): http://pymotw.com/2/multiprocessing/communication.html
"""
import sys, time
from os import sep
import socket
from multiprocessing import Process, Pipe
from multiprocessing.managers import BaseManager
import signal, errno
from Version import __VERSION__
import DNSServer, WebServer
import Settings, ATVSettings
from PILBackgrounds import isPILinstalled
from Debug import * # dprint()
def getIP_self():
cfg = param['CSettings']
if cfg.getSetting('enable_plexconnect_autodetect')=='True':
# get public ip of machine running PlexConnect
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('1.2.3.4', 1000))
IP = s.getsockname()[0]
dprint('PlexConnect', 0, "IP_self: "+IP)
else:
# manual override from "settings.cfg"
IP = cfg.getSetting('ip_plexconnect')
dprint('PlexConnect', 0, "IP_self (from settings): "+IP)
return IP
# initializer for Manager, proxy-ing ATVSettings to WebServer/XMLConverter
def initProxy():
signal.signal(signal.SIGINT, signal.SIG_IGN)
procs = {}
pipes = {}
param = {}
running = False
def startup():
global procs
global pipes
global param
global running
# Settings
cfg = Settings.CSettings()
param['CSettings'] = cfg
# Logfile
if cfg.getSetting('logpath').startswith('.'):
# relative to current path
logpath = sys.path[0] + sep + cfg.getSetting('logpath')
else:
# absolute path
logpath = cfg.getSetting('logpath')
param['LogFile'] = logpath + sep + 'PlexConnect.log'
param['LogLevel'] = cfg.getSetting('loglevel')
dinit('PlexConnect', param, True) # init logging, new file, main process
dprint('PlexConnect', 0, "Version: {0}", __VERSION__)
dprint('PlexConnect', 0, "Python: {0}", sys.version)
dprint('PlexConnect', 0, "Host OS: {0}", sys.platform)
dprint('PlexConnect', 0, "PILBackgrounds: Is PIL installed? {0}", isPILinstalled())
# more Settings
param['IP_self'] = getIP_self()
param['HostToIntercept'] = cfg.getSetting('hosttointercept')
param['baseURL'] = 'http://'+ param['HostToIntercept']
# proxy for ATVSettings
proxy = BaseManager()
proxy.register('ATVSettings', ATVSettings.CATVSettings)
proxy.start(initProxy)
param['CATVSettings'] = proxy.ATVSettings()
running = True
# init DNSServer
if cfg.getSetting('enable_dnsserver')=='True':
master, slave = Pipe() # endpoint [0]-PlexConnect, [1]-DNSServer
proc = Process(target=DNSServer.Run, args=(slave, param))
proc.start()
time.sleep(0.1)
if proc.is_alive():
procs['DNSServer'] = proc
pipes['DNSServer'] = master
else:
dprint('PlexConnect', 0, "DNSServer not alive. Shutting down.")
running = False
# init WebServer
if running:
master, slave = Pipe() # endpoint [0]-PlexConnect, [1]-WebServer
proc = Process(target=WebServer.Run, args=(slave, param))
proc.start()
time.sleep(0.1)
if proc.is_alive():
procs['WebServer'] = proc
pipes['WebServer'] = master
else:
dprint('PlexConnect', 0, "WebServer not alive. Shutting down.")
running = False
# init WebServer_SSL
if running and \
cfg.getSetting('enable_webserver_ssl')=='True':
master, slave = Pipe() # endpoint [0]-PlexConnect, [1]-WebServer
proc = Process(target=WebServer.Run_SSL, args=(slave, param))
proc.start()
time.sleep(0.1)
if proc.is_alive():
procs['WebServer_SSL'] = proc
pipes['WebServer_SSL'] = master
else:
dprint('PlexConnect', 0, "WebServer_SSL not alive. Shutting down.")
running = False
# not started successful - clean up
if not running:
cmdShutdown()
shutdown()
return running
def run(timeout=60):
# do something important
try:
time.sleep(timeout)
except IOError as e:
if e.errno == errno.EINTR and not running:
pass # mask "IOError: [Errno 4] Interrupted function call"
else:
raise
return running
def shutdown():
for slave in procs:
procs[slave].join()
param['CATVSettings'].saveSettings()
dprint('PlexConnect', 0, "shutdown")
def cmdShutdown():
global running
running = False
# send shutdown to all pipes
for slave in pipes:
pipes[slave].send('shutdown')
dprint('PlexConnect', 0, "Shutting down.")
def sighandler_shutdown(signum, frame):
signal.signal(signal.SIGINT, signal.SIG_IGN) # we heard you!
cmdShutdown()
if __name__=="__main__":
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
dprint('PlexConnect', 0, "***")
dprint('PlexConnect', 0, "PlexConnect")
dprint('PlexConnect', 0, "Press CTRL-C to shut down.")
dprint('PlexConnect', 0, "***")
running = startup()
while running:
running = run()
shutdown()
|
door_adapter.py | import sys
import yaml
import argparse
import time
import threading
import rclpy
from DoorClientAPI import DoorClientAPI
from rclpy.node import Node
from rclpy.time import Time
from rmf_door_msgs.msg import DoorRequest, DoorState, DoorMode
###############################################################################
class DoorAdapter(Node):
def __init__(self,config_yaml):
super().__init__('door_adapter')
self.get_logger().info('Starting door adapter...')
# Get value from config file
self.door_name = config_yaml['door']['name']
self.door_close_feature = config_yaml['door']['door_close_feature']
self.door_signal_period = config_yaml['door']['door_signal_period']
self.door_state_publish_period = config_yaml['door_publisher']['door_state_publish_period']
url = config_yaml['door']['api_endpoint']
api_key = config_yaml['door']['header_key']
api_value = config_yaml['door']['header_value']
door_id = config_yaml['door']['door_id']
door_pub = config_yaml['door_publisher']
door_sub = config_yaml['door_subscriber']
self.api = DoorClientAPI(url,api_key,api_value,door_id)
assert self.api.connected, "Unable to establish connection with door"
# default door state - closed mode
self.door_mode = DoorMode.MODE_CLOSED
# open door flag
self.open_door = False
self.check_status = False
self.door_states_pub = self.create_publisher(
DoorState, door_pub['topic_name'], 10)
self.door_request_sub = self.create_subscription(
DoorRequest, door_sub['topic_name'], self.door_request_cb, 10)
self.periodic_timer = self.create_timer(
self.door_state_publish_period, self.time_cb)
def door_open_command_request(self):
# assume API doesn't have close door API
# Once the door command is posted to the door API,
# the door will be opened and then close after 5 secs
while self.open_door:
success = self.api.open_door()
if success:
self.get_logger().info(f"Request to open door [{self.door_name}] is successful")
else:
self.get_logger().warning(f"Request to open door [{self.door_name}] is unsuccessful")
time.sleep(self.door_signal_period)
def time_cb(self):
if self.check_status:
self.door_mode = self.api.get_mode()
# when door request is to close door and the door state is close
# will assume the door state is close until next door open request
# This implement to reduce the number of API called
if self.door_mode == DoorMode.MODE_CLOSED and not self.open_door:
self.check_status = False
state_msg = DoorState()
state_msg.door_time = self.get_clock().now().to_msg()
# publish states of the door
state_msg.door_name = self.door_name
state_msg.current_mode.value = self.door_mode
self.door_states_pub.publish(state_msg)
def door_request_cb(self, msg: DoorRequest):
# when door node receive open request, the door adapter will send open command to API
# If door node receive close request, the door adapter will stop sending open command to API
# check DoorRequest msg whether the door name of the request is same as the current door. If not, ignore the request
if msg.door_name == self.door_name:
self.get_logger().info(f"Door mode [{msg.requested_mode.value}] requested by {msg.requester_id}")
if msg.requested_mode.value == DoorMode.MODE_OPEN:
# open door implementation
self.open_door = True
self.check_status = True
if self.door_close_feature:
self.api.open_door()
else:
t = threading.Thread(target = self.door_open_command_request)
t.start()
elif msg.requested_mode.value == DoorMode.MODE_CLOSED:
# close door implementation
self.open_door = False
self.get_logger().info('Close Command to door received')
if self.door_close_feature:
self.api.close_door()
else:
self.get_logger().error('Invalid door mode requested. Ignoring...')
###############################################################################
def main(argv=sys.argv):
rclpy.init(args=argv)
args_without_ros = rclpy.utilities.remove_ros_args(argv)
parser = argparse.ArgumentParser(
prog="door_adapter",
description="Configure and spin up door adapter for door ")
parser.add_argument("-c", "--config_file", type=str, required=True,
help="Path to the config.yaml file for this door adapter")
args = parser.parse_args(args_without_ros[1:])
config_path = args.config_file
# Load config and nav graph yamls
with open(config_path, "r") as f:
config_yaml = yaml.safe_load(f)
door_adapter = DoorAdapter(config_yaml)
rclpy.spin(door_adapter)
door_adapter.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main(sys.argv)
|
dense_update_ops_test.py | <<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Assign*."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
=======
"""Tests for tensorflow.ops.tf.Assign*."""
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class AssignOpTest(tf.test.TestCase):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def _initAssignFetch(self, x, y, use_gpu=False):
"""Initialize a param to init and update it with y."""
super(AssignOpTest, self).setUp()
<<<<<<< HEAD
with self.cached_session(use_gpu=use_gpu):
p = variables.Variable(x)
assign = state_ops.assign(p, y)
p.initializer.run()
new_value = self.evaluate(assign)
return self.evaluate(p), new_value
def _initAssignAddFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param += y."""
with self.cached_session(use_gpu=use_gpu):
p = variables.Variable(x)
add = state_ops.assign_add(p, y)
p.initializer.run()
new_value = self.evaluate(add)
return self.evaluate(p), new_value
def _initAssignSubFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param -= y."""
with self.cached_session(use_gpu=use_gpu):
p = variables.Variable(x)
sub = state_ops.assign_sub(p, y)
p.initializer.run()
new_value = self.evaluate(sub)
return self.evaluate(p), new_value
=======
with self.test_session(use_gpu=use_gpu):
p = tf.Variable(x)
assign = tf.assign(p, y)
p.initializer.run()
new_value = assign.eval()
return p.eval(), new_value
def _initAssignAddFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param += y."""
with self.test_session(use_gpu=use_gpu):
p = tf.Variable(x)
add = tf.assign_add(p, y)
p.initializer.run()
new_value = add.eval()
return p.eval(), new_value
def _initAssignSubFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param -= y."""
with self.test_session(use_gpu=use_gpu):
p = tf.Variable(x)
sub = tf.assign_sub(p, y)
p.initializer.run()
new_value = sub.eval()
return p.eval(), new_value
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
x = np.zeros(vals.shape).astype(dtype)
y = vals.astype(dtype)
var_value, op_value = self._initAssignFetch(x, y, use_gpu=False)
self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value)
var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=False)
self.assertAllEqual(x + y, var_value)
self.assertAllEqual(x + y, op_value)
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value)
<<<<<<< HEAD
if test.is_built_with_cuda() and dtype in [np.float32, np.float64]:
=======
if tf.test.IsBuiltWithCuda() and dtype in [np.float32, np.float64]:
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
var_value, op_value = self._initAssignFetch(x, y, use_gpu=True)
self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value)
var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=True)
self.assertAllEqual(x + y, var_value)
self.assertAllEqual(x + y, op_value)
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value)
<<<<<<< HEAD
@test_util.run_deprecated_v1
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
@test_util.run_v1_only("b/120545219")
def testAssignNonStrictShapeChecking(self):
with self.cached_session():
data = array_ops.fill([1024, 1024], 0)
p = variables.VariableV1([1])
a = state_ops.assign(p, data, validate_shape=False)
a.op.run()
self.assertAllEqual(p.eval(), self.evaluate(data))
# Assign to yet another shape
data2 = array_ops.fill([10, 10], 1)
a2 = state_ops.assign(p, data2, validate_shape=False)
a2.op.run()
self.assertAllEqual(p.eval(), self.evaluate(data2))
@test_util.run_v1_only("b/120545219")
def testInitRequiredAssignAdd(self):
with self.cached_session():
p = variables.VariableV1(array_ops.fill([1024, 1024], 1), dtypes.int32)
a = state_ops.assign_add(p, array_ops.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
@test_util.run_v1_only("b/120545219")
def testInitRequiredAssignSub(self):
with self.cached_session():
p = variables.VariableV1(array_ops.fill([1024, 1024], 1), dtypes.int32)
a = state_ops.assign_sub(p, array_ops.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
if __name__ == "__main__":
test.main()
=======
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testAssignNonStrictShapeChecking(self):
with self.test_session():
data = tf.fill([1024, 1024], 0)
p = tf.Variable([1])
a = tf.assign(p, data, validate_shape=False)
a.op.run()
self.assertAllEqual(p.eval(), data.eval())
# Assign to yet another shape
data2 = tf.fill([10, 10], 1)
a2 = tf.assign(p, data2, validate_shape=False)
a2.op.run()
self.assertAllEqual(p.eval(), data2.eval())
def testInitRequiredAssignAdd(self):
with self.test_session():
p = tf.Variable(tf.fill([1024, 1024], 1),
tf.int32)
a = tf.assign_add(p, tf.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
def testInitRequiredAssignSub(self):
with self.test_session():
p = tf.Variable(tf.fill([1024, 1024], 1),
tf.int32)
a = tf.assign_sub(p, tf.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
# NOTE(mrry): See also
# dense_update_ops_no_tsan_test.AssignOpTest, which contains a benign
# data race and must run without TSAN.
def testParallelUpdateWithLocking(self):
with self.test_session() as sess:
zeros_t = tf.fill([1024, 1024], 0.0)
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(zeros_t)
adds = [tf.assign_add(p, ones_t, use_locking=True)
for _ in range(20)]
p.initializer.run()
def run_add(add_op):
sess.run(add_op)
threads = [
self.checkedThread(target=run_add, args=(add_op,)) for add_op in adds]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
# NOTE(mrry): See also
# dense_update_ops_no_tsan_test.[...].testParallelAssignWithoutLocking,
# which contains a benign data race and must run without TSAN.
def testParallelAssignWithLocking(self):
with self.test_session() as sess:
zeros_t = tf.fill([1024, 1024], 0.0)
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(zeros_t)
assigns = [tf.assign(p, tf.mul(ones_t, float(i)),
use_locking=True)
for i in range(1, 21)]
p.initializer.run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [self.checkedThread(target=run_assign, args=(assign_op,))
for assign_op in assigns]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
tf.test.main()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
|
app.py | """
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
import logging.config
from signal import signal, SIGINT
from sys import exit
import os
import threading
import time
import json
from flask import Flask, Response, jsonify
from flask_cors import CORS
import amqp.amqp_handler as amqp_handler
from amqp.amqp import AmqpClient
from commons.esclient import EsClient
from commons import model_chooser
from utils import utils
from service.cluster_service import ClusterService
from service.auto_analyzer_service import AutoAnalyzerService
from service.analyzer_service import AnalyzerService
from service.suggest_service import SuggestService
from service.suggest_info_service import SuggestInfoService
from service.search_service import SearchService
from service.clean_index_service import CleanIndexService
from service.namespace_finder_service import NamespaceFinderService
from service.delete_index_service import DeleteIndexService
from service.retraining_service import RetrainingService
from service.suggest_patterns_service import SuggestPatternsService
APP_CONFIG = {
"esHost": os.getenv("ES_HOSTS", "http://elasticsearch:9200").strip("/").strip("\\"),
"esUser": os.getenv("ES_USER", "").strip(),
"esPassword": os.getenv("ES_PASSWORD", "").strip(),
"logLevel": os.getenv("LOGGING_LEVEL", "DEBUG").strip(),
"amqpUrl": os.getenv("AMQP_URL", "").strip("/").strip("\\"),
"exchangeName": os.getenv("AMQP_EXCHANGE_NAME", "analyzer"),
"analyzerPriority": int(os.getenv("ANALYZER_PRIORITY", "1")),
"analyzerIndex": json.loads(os.getenv("ANALYZER_INDEX", "true").lower()),
"analyzerLogSearch": json.loads(os.getenv("ANALYZER_LOG_SEARCH", "true").lower()),
"analyzerSuggest": json.loads(os.getenv("ANALYZER_SUGGEST", "true").lower()),
"analyzerCluster": json.loads(os.getenv("ANALYZER_CLUSTER", "true").lower()),
"turnOffSslVerification": json.loads(os.getenv("ES_TURN_OFF_SSL_VERIFICATION", "false").lower()),
"esVerifyCerts": json.loads(os.getenv("ES_VERIFY_CERTS", "false").lower()),
"esUseSsl": json.loads(os.getenv("ES_USE_SSL", "false").lower()),
"esSslShowWarn": json.loads(os.getenv("ES_SSL_SHOW_WARN", "false").lower()),
"esCAcert": os.getenv("ES_CA_CERT", ""),
"esClientCert": os.getenv("ES_CLIENT_CERT", ""),
"esClientKey": os.getenv("ES_CLIENT_KEY", ""),
"minioHost": os.getenv("MINIO_SHORT_HOST", "minio:9000"),
"minioAccessKey": os.getenv("MINIO_ACCESS_KEY", "minio"),
"minioSecretKey": os.getenv("MINIO_SECRET_KEY", "minio123"),
"appVersion": "",
"binaryStoreType": os.getenv("ANALYZER_BINARYSTORE_TYPE", "minio"),
"minioBucketPrefix": os.getenv("ANALYZER_BINARYSTORE_BUCKETPREFIX", "prj-"),
"minioRegion": os.getenv("ANALYZER_BINARYSTORE_MINIO_REGION", None),
"instanceTaskType": os.getenv("INSTANCE_TASK_TYPE", "").strip(),
"filesystemDefaultPath": os.getenv("FILESYSTEM_DEFAULT_PATH", "storage").strip(),
"esChunkNumber": int(os.getenv("ES_CHUNK_NUMBER", "1000")),
"esChunkNumberUpdateClusters": int(os.getenv("ES_CHUNK_NUMBER_UPDATE_CLUSTERS", "500")),
"esProjectIndexPrefix": os.getenv("ES_PROJECT_INDEX_PREFIX", "").strip(),
"analyzerHttpPort": int(os.getenv("ANALYZER_HTTP_PORT", "5001")),
"analyzerPathToLog": os.getenv("ANALYZER_FILE_LOGGING_PATH", "/tmp/config.log")
}
SEARCH_CONFIG = {
"MinShouldMatch": os.getenv("ES_MIN_SHOULD_MATCH", "80%"),
"BoostAA": float(os.getenv("ES_BOOST_AA", "-8.0")),
"BoostLaunch": float(os.getenv("ES_BOOST_LAUNCH", "4.0")),
"BoostUniqueID": float(os.getenv("ES_BOOST_UNIQUE_ID", "8.0")),
"MaxQueryTerms": int(os.getenv("ES_MAX_QUERY_TERMS", "50")),
"SearchLogsMinSimilarity": float(os.getenv("ES_LOGS_MIN_SHOULD_MATCH", "0.95")),
"MinWordLength": int(os.getenv("ES_MIN_WORD_LENGTH", "2")),
"TimeWeightDecay": float(os.getenv("ES_TIME_WEIGHT_DECAY", "0.95")),
"PatternLabelMinPercentToSuggest": float(os.getenv("PATTERN_LABEL_MIN_PERCENT", "0.9")),
"PatternLabelMinCountToSuggest": int(os.getenv("PATTERN_LABEL_MIN_COUNT", "5")),
"PatternMinCountToSuggest": int(os.getenv("PATTERN_MIN_COUNT", "10")),
"MaxLogsForDefectTypeModel": int(os.getenv("MAX_LOGS_FOR_DEFECT_TYPE_MODEL", "10000")),
"ProbabilityForCustomModelSuggestions": min(
0.8, float(os.getenv("PROB_CUSTOM_MODEL_SUGGESTIONS", "0.7"))),
"ProbabilityForCustomModelAutoAnalysis": min(
1.0, float(os.getenv("PROB_CUSTOM_MODEL_AUTO_ANALYSIS", "0.5"))),
"BoostModelFolder": "",
"SuggestBoostModelFolder": "",
"SimilarityWeightsFolder": "",
"GlobalDefectTypeModelFolder": "",
"RetrainSuggestBoostModelConfig": "",
"RetrainAutoBoostModelConfig": "",
"MaxSuggestionsNumber": int(os.getenv("MAX_SUGGESTIONS_NUMBER", "3")),
"AutoAnalysisTimeout": int(os.getenv("AUTO_ANALYSIS_TIMEOUT", "300")),
"MaxAutoAnalysisItemsToProcess": int(os.getenv("MAX_AUTO_ANALYSIS_ITEMS_TO_PROCESS", "4000"))
}
def create_application():
"""Creates a Flask application"""
_application = Flask(__name__)
return _application
def create_thread(func, args):
"""Creates a thread with specified function and arguments"""
thread = threading.Thread(target=func, args=args)
thread.start()
return thread
def declare_exchange(channel, config):
"""Declares exchange for rabbitmq"""
logger.info("ExchangeName: %s", config["exchangeName"])
try:
channel.exchange_declare(exchange=config["exchangeName"], exchange_type='direct',
durable=False, auto_delete=True, internal=False,
arguments={
"analyzer": config["exchangeName"],
"analyzer_index": config["analyzerIndex"],
"analyzer_priority": config["analyzerPriority"],
"analyzer_log_search": config["analyzerLogSearch"],
"analyzer_suggest": config["analyzerSuggest"],
"analyzer_cluster": config["analyzerCluster"],
"version": config["appVersion"], })
except Exception as err:
logger.error("Failed to declare exchange")
logger.error(err)
return False
logger.info("Exchange '%s' has been declared", config["exchangeName"])
return True
def init_amqp(_amqp_client):
"""Initialize rabbitmq queues, exchange and stars threads for queue messages processing"""
with _amqp_client.connection.channel() as channel:
try:
declare_exchange(channel, APP_CONFIG)
except Exception as err:
logger.error("Failed to declare amqp objects")
logger.error(err)
return
threads = []
_model_chooser = model_chooser.ModelChooser(APP_CONFIG, SEARCH_CONFIG)
if APP_CONFIG["instanceTaskType"] == "train":
_retraining_service = RetrainingService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "train_models", True, False,
lambda channel, method, props, body:
amqp_handler.handle_inner_amqp_request(channel, method, props, body,
_retraining_service.train_models))))
else:
_es_client = EsClient(APP_CONFIG, SEARCH_CONFIG)
_auto_analyzer_service = AutoAnalyzerService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
_delete_index_service = DeleteIndexService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
_clean_index_service = CleanIndexService(APP_CONFIG, SEARCH_CONFIG)
_analyzer_service = AnalyzerService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
_suggest_service = SuggestService(_model_chooser, APP_CONFIG, SEARCH_CONFIG)
_suggest_info_service = SuggestInfoService(APP_CONFIG, SEARCH_CONFIG)
_search_service = SearchService(APP_CONFIG, SEARCH_CONFIG)
_cluster_service = ClusterService(APP_CONFIG, SEARCH_CONFIG)
_namespace_finder_service = NamespaceFinderService(APP_CONFIG, SEARCH_CONFIG)
_suggest_patterns_service = SuggestPatternsService(APP_CONFIG, SEARCH_CONFIG)
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "index", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_es_client.index_logs,
prepare_response_data=amqp_handler.
prepare_index_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "analyze", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_auto_analyzer_service.analyze_logs,
prepare_response_data=amqp_handler.
prepare_analyze_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "delete", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_delete_index_service.delete_index,
prepare_data_func=amqp_handler.
prepare_delete_index,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "clean", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_clean_index_service.delete_logs,
prepare_data_func=amqp_handler.
prepare_clean_index,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "search", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_search_service.search_logs,
prepare_data_func=amqp_handler.
prepare_search_logs,
prepare_response_data=amqp_handler.
prepare_analyze_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "suggest", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_service.suggest_items,
prepare_data_func=amqp_handler.
prepare_test_item_info,
prepare_response_data=amqp_handler.
prepare_analyze_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "cluster", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_cluster_service.find_clusters,
prepare_data_func=amqp_handler.
prepare_launch_info,
prepare_response_data=amqp_handler.
prepare_index_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "stats_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_inner_amqp_request(channel, method, props, body,
_es_client.send_stats_info))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "namespace_finder", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_namespace_finder_service.update_chosen_namespaces,
publish_result=False))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "suggest_patterns", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_patterns_service.suggest_patterns,
prepare_data_func=amqp_handler.
prepare_delete_index,
prepare_response_data=amqp_handler.
prepare_index_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "index_suggest_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_info_service.index_suggest_info,
prepare_data_func=amqp_handler.
prepare_suggest_info_list,
prepare_response_data=amqp_handler.
prepare_index_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "remove_suggest_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_info_service.remove_suggest_info,
prepare_data_func=amqp_handler.
prepare_delete_index,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "update_suggest_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_suggest_info_service.update_suggest_info,
prepare_data_func=lambda x: x))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "remove_models", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_analyzer_service.remove_models,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "get_model_info", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_analyzer_service.get_model_info,
prepare_data_func=lambda x: x))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "defect_update", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_es_client.defect_update,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.
prepare_search_response_data))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "item_remove", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_clean_index_service.delete_test_items,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.
output_result))))
threads.append(create_thread(AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(APP_CONFIG["exchangeName"], "launch_remove", True, False,
lambda channel, method, props, body:
amqp_handler.handle_amqp_request(channel, method, props, body,
_clean_index_service.delete_launches,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.
output_result))))
threads.append(
create_thread(
AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(
APP_CONFIG["exchangeName"],
"remove_by_launch_start_time",
True,
False,
lambda channel, method, props, body: amqp_handler.handle_amqp_request(
channel,
method,
props,
body,
_clean_index_service.remove_by_launch_start_time,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.output_result,
),
),
)
)
threads.append(
create_thread(
AmqpClient(APP_CONFIG["amqpUrl"]).receive,
(
APP_CONFIG["exchangeName"],
"remove_by_log_time",
True,
False,
lambda channel, method, props, body: amqp_handler.handle_amqp_request(
channel,
method,
props,
body,
_clean_index_service.remove_by_log_time,
prepare_data_func=lambda x: x,
prepare_response_data=amqp_handler.output_result,
),
),
)
)
return threads
def read_version():
"""Reads the application build version"""
version_filename = "VERSION"
if os.path.exists(version_filename):
with open(version_filename, "r") as file:
return file.read().strip()
return ""
def read_model_settings():
"""Reads paths to models"""
model_settings = utils.read_json_file("", "model_settings.json", to_json=True)
SEARCH_CONFIG["BoostModelFolder"] = model_settings["BOOST_MODEL_FOLDER"]
SEARCH_CONFIG["SuggestBoostModelFolder"] = model_settings["SUGGEST_BOOST_MODEL_FOLDER"]
SEARCH_CONFIG["SimilarityWeightsFolder"] = model_settings["SIMILARITY_WEIGHTS_FOLDER"]
SEARCH_CONFIG["GlobalDefectTypeModelFolder"] = model_settings["GLOBAL_DEFECT_TYPE_MODEL_FOLDER"]
SEARCH_CONFIG["RetrainSuggestBoostModelConfig"] = model_settings["RETRAIN_SUGGEST_BOOST_MODEL_CONFIG"]
SEARCH_CONFIG["RetrainAutoBoostModelConfig"] = model_settings["RETRAIN_AUTO_BOOST_MODEL_CONFIG"]
log_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logging.conf')
logging.config.fileConfig(log_file_path, defaults={'logfilename': APP_CONFIG["analyzerPathToLog"]})
if APP_CONFIG["logLevel"].lower() == "debug":
logging.disable(logging.NOTSET)
elif APP_CONFIG["logLevel"].lower() == "info":
logging.disable(logging.DEBUG)
else:
logging.disable(logging.INFO)
logger = logging.getLogger("analyzerApp")
APP_CONFIG["appVersion"] = read_version()
es_client = EsClient(APP_CONFIG, SEARCH_CONFIG)
read_model_settings()
application = create_application()
CORS(application)
threads = []
@application.route('/', methods=['GET'])
def get_health_status():
status = ""
if not es_client.is_healthy(APP_CONFIG["esHost"]):
status += "Elasticsearch is not healthy;"
if status:
logger.error("Analyzer health check status failed: %s", status)
return Response(json.dumps({"status": status}), status=503, mimetype='application/json')
return jsonify({"status": "healthy"})
def handler(signal_received, frame):
print('The analyzer has stopped')
exit(0)
def start_http_server():
application.logger.setLevel(logging.INFO)
logger.info("Started http server")
application.run(host='0.0.0.0', port=APP_CONFIG["analyzerHttpPort"], use_reloader=False)
signal(SIGINT, handler)
threads = []
logger.info("The analyzer has started")
while True:
try:
logger.info("Starting waiting for AMQP connection")
try:
amqp_client = AmqpClient(APP_CONFIG["amqpUrl"])
except Exception as err:
logger.error("Amqp connection was not established")
logger.error(err)
time.sleep(10)
continue
threads = init_amqp(amqp_client)
logger.info("Analyzer has started")
break
except Exception as err:
logger.error("The analyzer has failed")
logger.error(err)
if __name__ == '__main__':
logger.info("Program started")
start_http_server()
logger.info("The analyzer has finished")
exit(0)
|
inference_server.py | import zmq
import sys
import gflags
import time
import multiprocessing
import threading
import msgpack
import msgpack_numpy
msgpack_numpy.patch()
from .sparsify_traj import find_sparsifier
gflags.DEFINE_string('addr', '', '')
gflags.DEFINE_string('weights_file', '', '')
gflags.DEFINE_integer('n_worker', 1, '')
FLAGS = gflags.FLAGS
FLAGS(sys.argv)
def backend_process(listen_addr):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.connect(listen_addr)
mp = find_sparsifier(FLAGS.weights_file)(FLAGS.weights_file)
def exec_cmd(msg):
func_name = msg[0]
args = msg[1:]
try:
ret = getattr(mp, func_name)(*args)
return ret
except:
print('Failed to execute message: %r' % msg)
raise
while True:
msg = msgpack.unpackb(socket.recv(), raw=False)
ret = None
if msg[0] == 'exit':
socket.send_pyobj('ok')
break
elif msg[0] == 'cmd_list':
for m in msg[1:]:
ret = exec_cmd(m)
else:
ret = exec_cmd(msg)
socket.send(msgpack.packb(ret, use_bin_type=True))
socket.disconnect(listen_addr)
socket.close()
context.term()
print('reachability inference server backend terminated')
if __name__ == '__main__':
context = zmq.Context()
frontend = context.socket(zmq.ROUTER)
frontend.setsockopt(zmq.RCVHWM, 0)
frontend.setsockopt(zmq.SNDHWM, 0)
frontend.setsockopt(zmq.LINGER, 0)
frontend.bind(FLAGS.addr)
backend = context.socket(zmq.DEALER)
backend.setsockopt(zmq.RCVHWM, 0)
backend.setsockopt(zmq.SNDHWM, 0)
backend.setsockopt(zmq.LINGER, 0)
backend_addr = 'ipc:///tmp/reachability_inference-backend-%s' % str(time.time())
backend.bind(backend_addr)
backend_procs = []
for i in range(FLAGS.n_worker):
proc = multiprocessing.Process(target=backend_process, args=(backend_addr,))
proc.start()
backend_procs.append(proc)
def monitor_backend():
print('monitor thread started')
for proc in backend_procs:
proc.join()
print('all backend procs terminated')
context.term()
monitor_thread = threading.Thread(target=monitor_backend)
monitor_thread.start()
try:
zmq.device(zmq.QUEUE, frontend, backend)
except zmq.error.ContextTerminated:
frontend.close()
backend.close()
print('motion policy inference server frontend terminated')
|
keylime_agent.py | #!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import asyncio
import http.server
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import threading
from urllib.parse import urlparse
import base64
import configparser
import uuid
import os
import socket
import sys
import time
import hashlib
import zipfile
import io
import importlib
import shutil
import simplejson as json
from keylime import config
from keylime import keylime_logging
from keylime import cmd_exec
from keylime import crypto
from keylime import openstack
from keylime import revocation_notifier
from keylime import registrar_client
from keylime import secure_mount
from keylime.tpm import tpm_obj
from keylime.tpm.tpm_abstract import TPM_Utilities
# Configure logger
logger = keylime_logging.init_logging('cloudagent')
# get the tpm object
tpm = tpm_obj.getTPM(need_hw_tpm=True)
tpm_version = tpm.VERSION
# lock required for multithreaded operation
uvLock = threading.Lock()
class Handler(BaseHTTPRequestHandler):
parsed_path = ''
def do_HEAD(self):
"""Not supported"""
config.echo_json_response(self, 405, "HEAD not supported")
def do_GET(self):
"""This method services the GET request typically from either the Tenant or the Cloud Verifier.
Only tenant and cloudverifier uri's are supported. Both requests require a nonce parameter.
The Cloud verifier requires an additional mask paramter. If the uri or parameters are incorrect, a 400 response is returned.
"""
logger.info('GET invoked from ' + str(self.client_address) + ' with uri:' + self.path)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /keys/ or /quotes/ interfaces")
return
if "keys" in rest_params and rest_params['keys'] == 'verify':
if self.server.K is None:
logger.info(
'GET key challenge returning 400 response. bootstrap key not available')
config.echo_json_response(
self, 400, "Bootstrap key not yet available.")
return
challenge = rest_params['challenge']
response = {}
response['hmac'] = crypto.do_hmac(self.server.K, challenge)
config.echo_json_response(self, 200, "Success", response)
logger.info('GET key challenge returning 200 response.')
# If agent pubkey requested
elif "keys" in rest_params and rest_params["keys"] == "pubkey":
response = {}
response['pubkey'] = self.server.rsapublickey_exportable
config.echo_json_response(self, 200, "Success", response)
logger.info('GET pubkey returning 200 response.')
return
elif "quotes" in rest_params:
nonce = rest_params['nonce']
pcrmask = rest_params['mask'] if 'mask' in rest_params else None
vpcrmask = rest_params['vmask'] if 'vmask' in rest_params else None
# if the query is not messed up
if nonce is None:
logger.warning(
'GET quote returning 400 response. nonce not provided as an HTTP parameter in request')
config.echo_json_response(
self, 400, "nonce not provided as an HTTP parameter in request")
return
# Sanitization assurance (for tpm.run() tasks below)
if not (nonce.isalnum() and (pcrmask is None or pcrmask.isalnum()) and (vpcrmask is None or vpcrmask.isalnum())):
logger.warning(
'GET quote returning 400 response. parameters should be strictly alphanumeric')
config.echo_json_response(
self, 400, "parameters should be strictly alphanumeric")
return
# identity quotes are always shallow
hash_alg = tpm.defaults['hash']
if not tpm.is_vtpm() or rest_params["quotes"] == 'identity':
quote = tpm.create_quote(
nonce, self.server.rsapublickey_exportable, pcrmask, hash_alg)
imaMask = pcrmask
else:
quote = tpm.create_deep_quote(
nonce, self.server.rsapublickey_exportable, vpcrmask, pcrmask)
imaMask = vpcrmask
# Allow for a partial quote response (without pubkey)
enc_alg = tpm.defaults['encrypt']
sign_alg = tpm.defaults['sign']
if "partial" in rest_params and (rest_params["partial"] is None or int(rest_params["partial"], 0) == 1):
response = {
'quote': quote,
'tpm_version': tpm_version,
'hash_alg': hash_alg,
'enc_alg': enc_alg,
'sign_alg': sign_alg,
}
else:
response = {
'quote': quote,
'tpm_version': tpm_version,
'hash_alg': hash_alg,
'enc_alg': enc_alg,
'sign_alg': sign_alg,
'pubkey': self.server.rsapublickey_exportable,
}
# return a measurement list if available
if TPM_Utilities.check_mask(imaMask, config.IMA_PCR):
if not os.path.exists(config.IMA_ML):
logger.warn(
"IMA measurement list not available: %s" % (config.IMA_ML))
else:
with open(config.IMA_ML, 'r') as f:
ml = f.read()
response['ima_measurement_list'] = ml
config.echo_json_response(self, 200, "Success", response)
logger.info('GET %s quote returning 200 response.' %
(rest_params["quotes"]))
return
else:
logger.warning(
'GET returning 400 response. uri not supported: ' + self.path)
config.echo_json_response(self, 400, "uri not supported")
return
def do_POST(self):
"""This method services the POST request typically from either the Tenant or the Cloud Verifier.
Only tenant and cloudverifier uri's are supported. Both requests require a nonce parameter.
The Cloud verifier requires an additional mask parameter. If the uri or parameters are incorrect, a 400 response is returned.
"""
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /keys/ interface")
return
content_length = int(self.headers.get('Content-Length', 0))
if content_length <= 0:
logger.warning(
'POST returning 400 response, expected content in message. url: ' + self.path)
config.echo_json_response(self, 400, "expected content in message")
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
b64_encrypted_key = json_body['encrypted_key']
decrypted_key = crypto.rsa_decrypt(
self.server.rsaprivatekey, base64.b64decode(b64_encrypted_key))
have_derived_key = False
if rest_params["keys"] == "ukey":
self.server.add_U(decrypted_key)
self.server.auth_tag = json_body['auth_tag']
self.server.payload = json_body.get('payload', None)
have_derived_key = self.server.attempt_decryption(self)
elif rest_params["keys"] == "vkey":
self.server.add_V(decrypted_key)
have_derived_key = self.server.attempt_decryption(self)
else:
logger.warning(
'POST returning response. uri not supported: ' + self.path)
config.echo_json_response(self, 400, "uri not supported")
return
logger.info('POST of %s key returning 200' %
(('V', 'U')[rest_params["keys"] == "ukey"]))
config.echo_json_response(self, 200, "Success")
# no key yet, then we're done
if not have_derived_key:
return
# woo hoo we have a key
# ok lets write out the key now
secdir = secure_mount.mount() # confirm that storage is still securely mounted
# clean out the secure dir of any previous info before we extract files
if os.path.isdir("%s/unzipped" % secdir):
shutil.rmtree("%s/unzipped" % secdir)
# write out key file
f = open(secdir + "/" + self.server.enc_keyname, 'w')
f.write(base64.b64encode(self.server.K).decode())
f.close()
# stow the U value for later
tpm.write_key_nvram(self.server.final_U)
# optionally extend a hash of they key and payload into specified PCR
tomeasure = self.server.K
# if we have a good key, now attempt to write out the encrypted payload
dec_path = "%s/%s" % (secdir,
config.get('cloud_agent', "dec_payload_file"))
enc_path = "%s/encrypted_payload" % config.WORK_DIR
dec_payload = None
enc_payload = None
if self.server.payload is not None:
dec_payload = crypto.decrypt(
self.server.payload, bytes(self.server.K))
enc_payload = self.server.payload
elif os.path.exists(enc_path):
# if no payload provided, try to decrypt one from a previous run stored in encrypted_payload
with open(enc_path, 'rb') as f:
enc_payload = f.read()
try:
dec_payload = crypto.decrypt(enc_payload, self.server.K)
logger.info("Decrypted previous payload in %s to %s" %
(enc_path, dec_path))
except Exception as e:
logger.warning(
"Unable to decrypt previous payload %s with derived key: %s" % (enc_path, e))
os.remove(enc_path)
enc_payload = None
# also write out encrypted payload to be decrytped next time
if enc_payload is not None:
with open(enc_path, 'wb') as f:
f.write(self.server.payload.encode('utf-8'))
# deal with payload
payload_thread = None
if dec_payload is not None:
tomeasure = tomeasure + dec_payload
# see if payload is a zip
zfio = io.BytesIO(dec_payload)
if config.getboolean('cloud_agent', 'extract_payload_zip') and zipfile.is_zipfile(zfio):
logger.info(
"Decrypting and unzipping payload to %s/unzipped" % secdir)
with zipfile.ZipFile(zfio, 'r')as f:
f.extractall('%s/unzipped' % secdir)
# run an included script if one has been provided
initscript = config.get('cloud_agent', 'payload_script')
if initscript != "":
def initthread():
import subprocess
env = os.environ.copy()
env['AGENT_UUID'] = self.server.agent_uuid
proc = subprocess.Popen(["/bin/bash", initscript], env=env, shell=False, cwd='%s/unzipped' % secdir,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = proc.stdout.readline()
if line == '' and proc.poll() is not None:
break
if line:
logger.debug("init-output: %s" % line.strip())
# should be a no-op as poll already told us it's done
proc.wait()
if not os.path.exists("%s/unzipped/%s" % (secdir, initscript)):
logger.info(
"No payload script %s found in %s/unzipped" % (initscript, secdir))
else:
logger.info(
"Executing payload script: %s/unzipped/%s" % (secdir, initscript))
payload_thread = threading.Thread(target=initthread)
else:
logger.info("Decrypting payload to %s" % dec_path)
with open(dec_path, 'wb') as f:
f.write(dec_payload)
zfio.close()
# now extend a measurement of the payload and key if there was one
pcr = config.getint('cloud_agent', 'measure_payload_pcr')
if pcr > 0 and pcr < 24:
logger.info("extending measurement of payload into PCR %s" % pcr)
measured = tpm.hashdigest(tomeasure)
tpm.extendPCR(pcr, measured)
if payload_thread is not None:
payload_thread.start()
return
def get_query_tag_value(self, path, query_tag):
"""This is a utility method to query for specific the http parameters in the uri.
Returns the value of the parameter, or None if not found."""
data = {}
parsed_path = urlparse(self.path)
query_tokens = parsed_path.query.split('&')
# find the 'ids' query, there can only be one
for tok in query_tokens:
query_tok = tok.split('=')
query_key = query_tok[0]
if query_key is not None and query_key == query_tag:
# ids tag contains a comma delimited list of ids
data[query_tag] = query_tok[1]
break
return data.get(query_tag, None)
def log_message(self, logformat, *args):
return
# consider using PooledProcessMixIn
# https://github.com/muayyad-alsadi/python-PooledProcessMixIn
class CloudAgentHTTPServer(ThreadingMixIn, HTTPServer):
"""Http Server which will handle each request in a separate thread."""
''' Do not modify directly unless you acquire uvLock. Set chosen for uniqueness of contained values'''
u_set = set([])
v_set = set([])
rsaprivatekey = None
rsapublickey = None
rsapublickey_exportable = None
done = threading.Event()
auth_tag = None
payload = None
enc_keyname = None
K = None
final_U = None
agent_uuid = None
def __init__(self, server_address, RequestHandlerClass, agent_uuid):
"""Constructor overridden to provide ability to pass configuration arguments to the server"""
secdir = secure_mount.mount()
keyname = "%s/%s" % (secdir, config.get('cloud_agent', 'rsa_keyname'))
# read or generate the key depending on configuration
if os.path.isfile(keyname):
# read in private key
logger.debug("Using existing key in %s" % keyname)
f = open(keyname, "rb")
rsa_key = crypto.rsa_import_privkey(f.read())
else:
logger.debug("key not found, generating a new one")
rsa_key = crypto.rsa_generate(2048)
with open(keyname, "wb") as f:
f.write(crypto.rsa_export_privkey(rsa_key))
self.rsaprivatekey = rsa_key
self.rsapublickey_exportable = crypto.rsa_export_pubkey(
self.rsaprivatekey)
# attempt to get a U value from the TPM NVRAM
nvram_u = tpm.read_key_nvram()
if nvram_u is not None:
logger.info("Existing U loaded from TPM NVRAM")
self.add_U(nvram_u)
http.server.HTTPServer.__init__(
self, server_address, RequestHandlerClass)
self.enc_keyname = config.get('cloud_agent', 'enc_keyname')
self.agent_uuid = agent_uuid
def add_U(self, u):
"""Threadsafe method for adding a U value received from the Tenant
Do not modify u_set of v_set directly.
"""
with uvLock:
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if config.INSECURE_DEBUG:
logger.debug("Adding U len %d data:%s" %
(len(u), base64.b64encode(u)))
self.u_set.add(u)
def add_V(self, v):
"""Threadsafe method for adding a U value received from the Cloud Verifier
Do not modify u_set of v_set directly.
"""
with uvLock:
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if config.INSECURE_DEBUG:
logger.debug(F"Adding V: {base64.b64encode(v)}")
self.v_set.add(v)
def attempt_decryption(self, handler):
"""On reception of a U or V value, this method is called to attempt the decryption of the Cloud Init script
At least one U and V value must be received in order to attempt encryption. Multiple U and V values are stored
to prevent an attacker from sending U/V values to deny service.
"""
with uvLock:
both_u_and_v_present = False
return_value = False
for u in self.u_set:
for v in self.v_set:
both_u_and_v_present = True
return_value = self.decrypt_check(u, v)
if return_value:
# reset u and v sets
self.u_set = set([])
self.v_set = set([])
return return_value
# TODO check on whether this happens or not. NVRAM causes trouble
if both_u_and_v_present:
pass
# logger.critical("Possible attack from: " + str(handler.client_address) + ". Both U (potentially stale from TPM NVRAM) and V present but unsuccessful in attempt to decrypt check value.")
return return_value
def decrypt_check(self, decrypted_U, decrypted_V):
"""Decrypt the Cloud init script with the passed U and V values.
This method will access the received auth tag, and may fail if decoy U and V values were received.
Do not call directly unless you acquire uvLock. Returns None if decryption unsuccessful, else returns the
decrypted agent UUID.
"""
if self.auth_tag is None:
return None
if len(decrypted_U) != len(decrypted_V):
logger.warning("Invalid U len %d or V len %d. skipping..." %
(len(decrypted_U), len(decrypted_V)))
return None
candidate_key = crypto.strbitxor(decrypted_U, decrypted_V)
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if config.INSECURE_DEBUG:
logger.debug(F"U: {base64.b64encode(decrypted_U)}")
logger.debug(F"V: {base64.b64encode(decrypted_V)}")
logger.debug(F"K: {base64.b64encode(candidate_key)}")
logger.debug("auth_tag: " + self.auth_tag)
ex_mac = crypto.do_hmac(candidate_key, self.agent_uuid)
if ex_mac == self.auth_tag:
logger.info("Successfully derived K for UUID %s", self.agent_uuid)
self.final_U = decrypted_U
self.K = candidate_key
return True
else:
logger.error("Failed to derive K for UUID %s", self.agent_uuid)
return False
def main(argv=sys.argv):
if os.getuid() != 0 and config.REQUIRE_ROOT:
logger.critical("This process must be run as root.")
return
if config.get('cloud_agent', 'agent_uuid') == 'dmidecode':
if os.getuid() != 0:
raise RuntimeError('agent_uuid is configured to use dmidecode, '
'but current process is not running as root.')
cmd = ['which', 'dmidecode']
ret = cmd_exec.run(cmd, raiseOnError=False)
if ret['code'] != 0:
raise RuntimeError('agent_uuid is configured to use dmidecode, '
'but it\'s is not found on the system.')
# get params for initialization
registrar_ip = config.get('registrar', 'registrar_ip')
registrar_port = config.get('registrar', 'registrar_port')
# initialize the tmpfs partition to store keys if it isn't already available
secdir = secure_mount.mount()
# change dir to working dir
config.ch_dir(config.WORK_DIR, logger)
# initialize tpm
(ek, ekcert, aik, ek_tpm, aik_name) = tpm.tpm_init(self_activate=False, config_pw=config.get(
'cloud_agent', 'tpm_ownerpassword')) # this tells initialize not to self activate the AIK
virtual_agent = tpm.is_vtpm()
# try to get some TPM randomness into the system entropy pool
tpm.init_system_rand()
if ekcert is None:
if virtual_agent:
ekcert = 'virtual'
elif tpm.is_emulator():
ekcert = 'emulator'
# now we need the UUID
try:
agent_uuid = config.get('cloud_agent', 'agent_uuid')
except configparser.NoOptionError:
agent_uuid = None
if agent_uuid == 'openstack':
agent_uuid = openstack.get_openstack_uuid()
elif agent_uuid == 'hash_ek':
agent_uuid = hashlib.sha256(ek).hexdigest()
elif agent_uuid == 'generate' or agent_uuid is None:
agent_uuid = str(uuid.uuid4())
elif agent_uuid == 'dmidecode':
cmd = ['dmidecode', '-s', 'system-uuid']
ret = cmd_exec.run(cmd)
sys_uuid = ret['retout'].decode('utf-8')
agent_uuid = sys_uuid.strip()
elif agent_uuid == 'hostname':
agent_uuid = socket.getfqdn()
if config.STUB_VTPM and config.TPM_CANNED_VALUES is not None:
# Use canned values for stubbing
jsonIn = config.TPM_CANNED_VALUES
if "add_vtpm_to_group" in jsonIn:
# The value we're looking for has been canned!
agent_uuid = jsonIn['add_vtpm_to_group']['retout']
else:
# Our command hasn't been canned!
raise Exception("Command %s not found in canned json!" %
("add_vtpm_to_group"))
logger.info("Agent UUID: %s" % agent_uuid)
# register it and get back a blob
keyblob = registrar_client.doRegisterAgent(
registrar_ip, registrar_port, agent_uuid, tpm_version, ek, ekcert, aik, ek_tpm, aik_name)
if keyblob is None:
raise Exception("Registration failed")
# get the ephemeral registrar key
key = tpm.activate_identity(keyblob)
if key is None:
raise Exception("Activation failed")
# tell the registrar server we know the key
retval = False
if virtual_agent:
deepquote = tpm.create_deep_quote(
hashlib.sha1(key).hexdigest(), agent_uuid + aik + ek)
retval = registrar_client.doActivateVirtualAgent(
registrar_ip, registrar_port, agent_uuid, deepquote)
else:
retval = registrar_client.doActivateAgent(
registrar_ip, registrar_port, agent_uuid, key)
if not retval:
raise Exception("Registration failed on activate")
serveraddr = (config.get('cloud_agent', 'cloudagent_ip'),
config.getint('cloud_agent', 'cloudagent_port'))
server = CloudAgentHTTPServer(serveraddr, Handler, agent_uuid)
serverthread = threading.Thread(target=server.serve_forever)
logger.info(
f"Starting Cloud Agent on {serveraddr[0]}:{serveraddr[1]} use <Ctrl-C> to stop")
serverthread.start()
# want to listen for revocations?
if config.getboolean('cloud_agent', 'listen_notfications'):
cert_path = config.get('cloud_agent', 'revocation_cert')
if cert_path == "default":
cert_path = '%s/unzipped/RevocationNotifier-cert.crt' % (secdir)
elif cert_path[0] != '/':
# if it is a relative, convert to absolute in work_dir
cert_path = os.path.abspath('%s/%s' % (config.WORK_DIR, cert_path))
def perform_actions(revocation):
actionlist = []
# load the actions from inside the keylime module
actionlisttxt = config.get('cloud_agent', 'revocation_actions')
if actionlisttxt.strip() != "":
actionlist = actionlisttxt.split(',')
actionlist = ["revocation_actions.%s" % i for i in actionlist]
# load actions from unzipped
if os.path.exists("%s/unzipped/action_list" % secdir):
with open("%s/unzipped/action_list" % secdir, 'r') as f:
actionlisttxt = f.read()
if actionlisttxt.strip() != "":
localactions = actionlisttxt.strip().split(',')
for action in localactions:
if not action.startswith('local_action_'):
logger.warning(
"invalid local action: %s. must start with local_action_" % action)
else:
actionlist.append(action)
uzpath = "%s/unzipped" % secdir
if uzpath not in sys.path:
sys.path.append(uzpath)
for action in actionlist:
logger.info("executing revocation action %s" % action)
try:
module = importlib.import_module(action)
execute = getattr(module, 'execute')
asyncio.get_event_loop().run_until_complete(execute(revocation))
except Exception as e:
logger.warn(
"Exception during execution of revocation action %s: %s" % (action, e))
try:
while True:
try:
revocation_notifier.await_notifications(
perform_actions, revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warn(
"No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
tpm.flush_keys()
server.shutdown()
else:
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
tpm.flush_keys()
server.shutdown()
|
test_cli.py | #!/usr/bin/python
"""
(C) 2018,2019 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import subprocess
import sys
import os
import logging
import optparse # pylint: disable=deprecated-module
import time
import shutil
import tempfile
import re
import random
import json
import binascii
# pylint: disable=global-statement,unused-argument
CLI_PATH = None
TESTS_RUN = 0
TESTS_FAILED = 0
class TestLogHandler(logging.StreamHandler, object):
def emit(self, record):
# Do the default stuff first
super(TestLogHandler, self).emit(record)
if record.levelno >= logging.ERROR:
global TESTS_FAILED
TESTS_FAILED += 1
def setup_logging(options):
if options.verbose:
log_level = logging.DEBUG
elif options.quiet:
log_level = logging.WARNING
else:
log_level = logging.INFO
lh = TestLogHandler(sys.stdout)
lh.setFormatter(logging.Formatter('%(levelname) 7s: %(message)s'))
logging.getLogger().addHandler(lh)
logging.getLogger().setLevel(log_level)
def random_port_number():
return random.randint(1024, 65535)
def test_cli(cmd, cmd_options, expected_output=None, cmd_input=None, expected_stderr=None, use_drbg=True):
global TESTS_RUN
TESTS_RUN += 1
opt_list = []
if isinstance(cmd_options, str):
opt_list = cmd_options.split(' ')
elif isinstance(cmd_options, list):
opt_list = cmd_options
if use_drbg:
fixed_drbg_seed = "802" * 32
drbg_options = ['--rng-type=drbg', '--drbg-seed=' + fixed_drbg_seed]
else:
drbg_options = []
cmdline = [CLI_PATH, cmd] + drbg_options + opt_list
logging.debug("Executing '%s'" % (' '.join([CLI_PATH, cmd] + opt_list)))
stdout = None
stderr = None
if cmd_input is None:
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
else:
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate(cmd_input.encode())
if stderr:
if expected_stderr is None:
logging.error("Got output on stderr %s (stdout was %s)", stderr, stdout)
else:
if stderr != expected_stderr:
logging.error("Got output on stderr %s which did not match expected value %s", stderr, expected_stderr)
output = stdout.decode('ascii').strip()
if expected_output is not None:
if output != expected_output:
logging.error("Got unexpected output running cmd %s %s", cmd, cmd_options)
logging.info("Output lengths %d vs expected %d", len(output), len(expected_output))
logging.info("Got %s", output)
logging.info("Exp %s", expected_output)
return output
def check_for_command(cmd):
cmdline = [CLI_PATH, 'has_command', cmd]
proc = subprocess.Popen(cmdline)
proc.communicate()
return proc.returncode == 0
def cli_config_tests(_tmp_dir):
prefix = test_cli("config", "prefix")
cflags = test_cli("config", "cflags")
ldflags = test_cli("config", "ldflags")
libs = test_cli("config", "libs")
if len(prefix) < 4 or prefix[0] != '/':
logging.error("Bad prefix %s" % (prefix))
if ("-I%s" % (prefix)) not in cflags:
logging.error("Bad cflags %s" % (cflags))
if ("-L%s" % (prefix)) not in ldflags:
logging.error("Bad ldflags %s" % (ldflags))
if "-lbotan-2" not in libs:
logging.error("Bad libs %s" % (libs))
def cli_help_tests(_tmp_dir):
output = test_cli("help", None, None)
# Maybe test format somehow??
if len(output) < 500:
logging.error("Help output seems very short")
def cli_version_tests(_tmp_dir):
output = test_cli("version", None, None)
version_re = re.compile(r'[0-9]\.[0-9]+\.[0-9]')
if not version_re.match(output):
logging.error("Unexpected version output %s" % (output))
output = test_cli("version", ["--full"], None, None)
version_full_re = re.compile(r'Botan [0-9]\.[0-9]+\.[0-9] \(.* revision .*, distribution .*\)')
if not version_full_re.match(output):
logging.error("Unexpected version output %s" % (output))
def cli_is_prime_tests(_tmp_dir):
test_cli("is_prime", "5", "5 is probably prime")
test_cli("is_prime", "9", "9 is composite")
test_cli("is_prime", "548950623407687320763", "548950623407687320763 is probably prime")
def cli_gen_prime_tests(_tmp_dir):
test_cli("gen_prime", "64", "15568813029901363163")
test_cli("gen_prime", "128", "287193909494025008847286845478788766073")
def cli_entropy_tests(_tmp_dir):
output = test_cli("entropy", ["all"], None)
status_re = re.compile('Polling [a-z0-9_]+ gathered [0-9]+ bytes in [0-9]+ outputs with estimated entropy [0-9]+')
unavail_re = re.compile('Source [a-z0-9_]+ is unavailable')
comp_re = re.compile('Sample from [a-z0-9_]+ was .* compressed from [0-9]+ bytes to [0-9]+ bytes')
output_re = re.compile(r'[A-F0-9]+(...)?')
status_next = True
for line in output.split('\n'):
if comp_re.match(line):
continue
if status_next:
if status_re.match(line) is not None:
status_next = False
elif unavail_re.match(line) is not None:
pass
else:
logging.error('Unexpected status line %s', line)
status_next = False
else:
if output_re.match(line) is None:
logging.error('Unexpected sample line %s', line)
status_next = True
def cli_factor_tests(_tmp_dir):
test_cli("factor", "97", "97: 97")
test_cli("factor", "9753893489562389", "9753893489562389: 21433 455087644733")
test_cli("factor", "12019502040659149507", "12019502040659149507: 3298628633 3643787579")
def cli_mod_inverse_tests(_tmp_dir):
test_cli("mod_inverse", "97 802", "339")
test_cli("mod_inverse", "98 802", "0")
def cli_base64_tests(_tmp_dir):
test_cli("base64_enc", "-", "YmVlcyE=", "bees!")
test_cli("base64_dec", "-", "bees!", "YmVlcyE=")
def cli_base32_tests(_tmp_dir):
test_cli("base32_enc", "-", "MJSWK4ZB", "bees!")
test_cli("base32_dec", "-", "bees!", "MJSWK4ZB")
def cli_base58_tests(_tmp_dir):
test_cli("base58_enc", "-", "C6sRAr4", "bees!")
test_cli("base58_dec", "-", "bees!", "C6sRAr4")
test_cli("base58_enc", ["--check", "-"], "Cjv15cdjaBc", "F00F")
test_cli("base58_dec", ["--check", "-"], "F00F", "Cjv15cdjaBc")
def cli_hex_tests(_tmp_dir):
test_cli("hex_enc", "-", "6265657321", "bees!")
test_cli("hex_dec", "-", "bees!", "6265657321")
def cli_hash_tests(_tmp_dir):
test_cli("hash", "--algo=SHA-256",
"E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855 -", "")
test_cli("hash", "--algo=SHA-256",
"BA7816BF8F01CFEA414140DE5DAE2223B00361A396177A9CB410FF61F20015AD -", "abc")
test_cli("hash", ["--algo=SHA-256", "--format=base64"],
"ungWv48Bz+pBQUDeXa4iI7ADYaOWF3qctBD/YfIAFa0= -", "abc")
test_cli("hash", ["--algo=SHA-224", "--format=base58", "--no-fsname"],
"MuGc8HkSVyJjfMjPM5UQikPToBTzNucEghcGLe", "abc")
test_cli("hash", ["--algo=SHA-224", "--format=base58check", "--no-fsname"],
"3MmfMqgrhemdVa9bDAGfooukbviWtKMBx2xauL2RsyAe", "abc")
def cli_hmac_tests(tmp_dir):
key_file = os.path.join(tmp_dir, 'hmac.key')
test_cli("rng", ["64", "--output=%s" % (key_file)], "")
test_cli("hmac", ["--no-fsname", "--hash=SHA-384", key_file, key_file],
"E3A8529377030B28A7DBDFC50DDEC8E4ECEFB6EA850D95EB785938CD3E3AFEF9EF8B08AF219C1496633193468AB755CB")
def cli_bcrypt_tests(_tmp_dir):
test_cli("gen_bcrypt", "--work-factor=4 s3kr1t",
"$2a$04$0.8G7o08XYwvBBWA3l0WUujtwoGZgGDzVSN8fNkNqXikcK4A3lHPS")
test_cli("check_bcrypt", "s3kr1t $2a$04$gHX4Qg7pDSJuXiPXnmt8leyb.FFzX1Bv4rXwIj2cPSakJ8zNnhIka",
"Password is valid")
test_cli("check_bcrypt", "santa $2a$04$gHX4Qg7pDSJuXiPXnmt8leyb.FFzX1Bv4rXwIj2cPSakJ8zNnhIka",
"Password is NOT valid")
def cli_argon2_tests(_tmp_dir):
password = "s3kr1t"
expected = "$argon2id$v=19$m=8,t=1,p=1$2A+I9q2+ZayxDDYC5n2YWw$/Lhx+Jbtlpw+Kxpskfv7+AKhBL/5ebalTJkVC1O5+1E"
test_cli("gen_argon2", ['--mem=8', password], expected)
test_cli("gen_argon2", ['--mem=8', '--t=1', password], expected)
test_cli("gen_argon2", ['--mem=8', '--t=1', '--p=1', password], expected)
test_cli("check_argon2", [password, expected], "Password is valid")
test_cli("check_argon2", ["guessing", expected], "Password is NOT valid")
def cli_gen_dl_group_tests(_tmp_dir):
pem = """-----BEGIN X9.42 DH PARAMETERS-----
MIIBJAKBgwTw7LQiLkXJsrgMVQxTPlWaQlYz/raZ+5RtIZe4YluQgRQGPFADLZ/t
TOYzuIzZJFOcdKtEtrVkxZRGSkjZwKFKLUD6fzSjoC2M2EHktK/y5HsvxBxL4tKr
q1ffbyPQi+iBLYTZAXygvxj2vWyrvA+/w4nbt1fStCHTDhWjLWqFpV9nAoGDAKzA
HUu/IRl7OiUtW/dz36gzEJnaYtz4ZtJl0FG8RJiOe02lD8myqW2sVzYqMvKD0LGx
x9fdSKC1G+aZ/NWtqrQjb66Daf7b0ddDx+bfWTWJ2dOtZd8IL2rmQQJm+JogDi9i
huVYFicDNQGzi+nEKAzrZ1L/VxtiSiw/qw0IyOuVtz8CFjgPiPatvmWssQw2AuZ9
mFvAZ/8wal0=
-----END X9.42 DH PARAMETERS-----"""
test_cli("gen_dl_group", "--pbits=1043", pem)
dsa_grp = """-----BEGIN X9.42 DH PARAMETERS-----
MIIBHgKBgQCyP1vosC/axliM2hmJ9EOSdd1zBkuzMP25CYD8PFkRVrPLr1ClSUtn
eXTIsHToJ7d7sRwtidQGW9BrvUEyiAWE06W/wnLPxB3/g2/l/P2EhbNmNHAO7rV7
ZVz/uKR4Xcvzxg9uk5MpT1VsxA8H6VEwzefNF1Rya92rqGgBTNT3/wKBgC7HLL8A
Gu3tqJxTk1iNgojjOiSreLn6ihA8R8kQnRXDTNtDKz996KHGInfMBurUI1zPM3xq
bHc0CvU1Nf87enhPIretzJcFgiCWrNFUIC25zPEjp0s3/ERHT4Bi1TABZ3j6YUEQ
fnnj+9XriKKHf2WtX0T4FXorvnKq30m934rzAhUAvwhWDK3yZEmphc7dwl4/J3Zp
+MU=
-----END X9.42 DH PARAMETERS-----"""
test_cli("gen_dl_group", ["--type=dsa", "--pbits=1024"], dsa_grp)
def cli_key_tests(tmp_dir):
pem = """-----BEGIN PRIVATE KEY-----
MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQg2A+I9q2+ZayxDDYC5n2Y
W8Bn/zBm4D3mwS5qMwADRDehRANCAATwnDFqsjXL9SD/Rr1Vy4pb79PswXdQNZBN
mlLtJ5JvZ0/p6zP3x+Y9yPIrAR8L/acG5ItSrAKXzzuqQQZMv4aN
-----END PRIVATE KEY-----"""
priv_key = os.path.join(tmp_dir, 'priv.pem')
pub_key = os.path.join(tmp_dir, 'pub.pem')
pub_der_key = os.path.join(tmp_dir, 'pub.der')
enc_pem = os.path.join(tmp_dir, 'priv_enc.pem')
enc_der = os.path.join(tmp_dir, 'priv_enc.der')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
user_cert = os.path.join(tmp_dir, 'user.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp256k1"], pem)
test_cli("keygen", ["--algo=ECDSA", "--params=secp256r1", "--output=" + priv_key], "")
test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key, priv_key), "")
test_cli("pkcs8", "--pub-out --der-out --output=%s %s" % (pub_der_key, priv_key), "")
test_cli("pkcs8", "--pass-out=foof --der-out --output=%s %s" % (enc_der, priv_key), "")
test_cli("pkcs8", "--pass-out=foof --output=%s %s" % (enc_pem, priv_key), "")
dec_pem = test_cli("pkcs8", ["--pass-in=foof", enc_pem], None)
dec_der = test_cli("pkcs8", ["--pass-in=foof", enc_der], None)
if dec_pem != dec_der:
logging.error("Problem decrypting PKCS8 key")
test_cli("fingerprint", ['--no-fsname', pub_key],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
test_cli("fingerprint", ['--no-fsname', pub_der_key],
"83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4")
valid_sig = "nI4mI1ec14Y7nYUWs2edysAVvkob0TWpmGh5rrYWDA+/W9Fj0ZM21qJw8qa3/avAOIVBO6hoMEVmfJYXlS+ReA=="
test_cli("sign", "--provider=base %s %s" % (priv_key, pub_key), valid_sig)
test_cli("verify", [pub_key, pub_key, '-'],
"Signature is valid", valid_sig)
test_cli("verify", [pub_key, pub_key, '-'],
"Signature is invalid",
valid_sig.replace("G", "H"))
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("cert_verify", ca_cert, "Certificate did not validate - Cannot establish trust")
cert_info = test_cli("cert_info", ['--fingerprint', ca_cert], None)
if cert_info.find('Subject: CN="CA",C="VT"') < 0:
logging.error('Unexpected output for cert_info command %s', cert_info)
if cert_info.find('Subject keyid: 69DD911C9EEE3400C67CBC3F3056CBE711BD56AF9495013F') < 0:
logging.error('Unexpected output for cert_info command %s', cert_info)
test_cli("gen_pkcs10", "%s User --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, user_cert))
test_cli("cert_verify", [user_cert, ca_cert],
"Certificate passes validation checks")
test_cli("cert_verify", user_cert,
"Certificate did not validate - Certificate issuer not found")
def cli_xmss_sign_tests(tmp_dir):
priv_key = os.path.join(tmp_dir, 'priv.pem')
pub_key = os.path.join(tmp_dir, 'pub.pem')
pub_key2 = os.path.join(tmp_dir, 'pub2.pem')
msg = os.path.join(tmp_dir, 'input')
sig1 = os.path.join(tmp_dir, 'sig1')
sig2 = os.path.join(tmp_dir, 'sig2')
test_cli("rng", ['--output=%s' % (msg)], "")
test_cli("hash", ["--no-fsname", msg], "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855")
test_cli("keygen", ["--algo=XMSS", "--output=%s" % (priv_key)], "")
test_cli("hash", ["--no-fsname", priv_key], "32397312E3FAC9D6396C55FEEFFF11EE195E2D2D5B34279D2544AF27763B0946")
test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key, priv_key), "")
test_cli("fingerprint", ['--no-fsname', pub_key],
"E2:BE:C8:6D:CF:4B:5D:67:AB:A1:C1:F8:36:79:D5:3B:D8:17:D5:E3:5B:BE:29:08:03:7E:6E:07:27:4E:16:46")
# verify the key is updated after each signature:
test_cli("sign", [priv_key, msg, "--output=%s" % (sig1)], "")
test_cli("verify", [pub_key, msg, sig1], "Signature is valid")
test_cli("hash", ["--no-fsname", sig1], "04AF45451C7A9AF2D828E1AD6EC262E012436F4087C5DA6F32C689D781E597D0")
test_cli("hash", ["--no-fsname", priv_key], "649E54D334F78A6AAAE34CFABF62121C74909D80E4DC2FA240A6EE1848526094")
test_cli("sign", [priv_key, msg, "--output=%s" % (sig2)], "")
test_cli("verify", [pub_key, msg, sig2], "Signature is valid")
test_cli("hash", ["--no-fsname", sig2], "0785A6AD54CC7D01F2BE2BC6463A3EAA1159792E52210ED754992C5068E8F24F")
test_cli("hash", ["--no-fsname", priv_key], "04483FA5367A7340F4BF6160FABD5742258009E05F9584E8D9732660B132608E")
# private key updates, public key is unchanged:
test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key2, priv_key), "")
test_cli("fingerprint", ['--no-fsname', pub_key2],
"E2:BE:C8:6D:CF:4B:5D:67:AB:A1:C1:F8:36:79:D5:3B:D8:17:D5:E3:5B:BE:29:08:03:7E:6E:07:27:4E:16:46")
def cli_pbkdf_tune_tests(_tmp_dir):
if not check_for_command("pbkdf_tune"):
return
expected = re.compile(r'For (default|[1-9][0-9]*) ms selected Scrypt\([0-9]+,[0-9]+,[0-9]+\) using [0-9]+ MiB')
output = test_cli("pbkdf_tune", ["--check", "1", "10", "50", "default"], None).split('\n')
for line in output:
if expected.match(line) is None:
logging.error("Unexpected line '%s'" % (line))
expected_pbkdf2 = re.compile(r'For (default|[1-9][0-9]*) ms selected PBKDF2\(HMAC\(SHA-256\),[0-9]+\)')
output = test_cli("pbkdf_tune", ["--algo=PBKDF2(SHA-256)", "--check", "1", "10", "50", "default"], None).split('\n')
for line in output:
if expected_pbkdf2.match(line) is None:
logging.error("Unexpected line '%s'" % (line))
expected_argon2 = re.compile(r'For (default|[1-9][0-9]*) ms selected Argon2id\([0-9]+,[0-9]+,[0-9]+\)')
output = test_cli("pbkdf_tune", ["--algo=Argon2id", "--check", "1", "10", "50", "default"], None).split('\n')
for line in output:
if expected_argon2.match(line) is None:
logging.error("Unexpected line '%s'" % (line))
def cli_psk_db_tests(tmp_dir):
if not check_for_command("psk_get"):
return
psk_db = os.path.join(tmp_dir, 'psk.db')
db_key1 = "909"*32
db_key2 = "451"*32
test_cli("psk_set", [psk_db, db_key1, "name", "F00FEE"], "")
test_cli("psk_set", [psk_db, db_key2, "name", "C00FEE11"], "")
test_cli("psk_set", [psk_db, db_key1, "name2", "50051029"], "")
test_cli("psk_get", [psk_db, db_key1, "name"], "F00FEE")
test_cli("psk_get", [psk_db, db_key2, "name"], "C00FEE11")
test_cli("psk_list", [psk_db, db_key1], "name\nname2")
test_cli("psk_list", [psk_db, db_key2], "name")
def cli_compress_tests(tmp_dir):
if not check_for_command("compress"):
return
input_file = os.path.join(tmp_dir, 'input.txt')
output_file = os.path.join(tmp_dir, 'input.txt.gz')
with open(input_file, 'w') as f:
f.write("hi there")
f.close()
test_cli("compress", input_file)
if not os.access(output_file, os.R_OK):
logging.error("Compression did not created expected output file")
is_py3 = sys.version_info[0] == 3
output_hdr = open(output_file, 'rb').read(2)
if is_py3:
if output_hdr[0] != 0x1F or output_hdr[1] != 0x8B:
logging.error("Did not see expected gzip header")
else:
if ord(output_hdr[0]) != 0x1F or ord(output_hdr[1]) != 0x8B:
logging.error("Did not see expected gzip header")
os.unlink(input_file)
test_cli("decompress", output_file)
if not os.access(input_file, os.R_OK):
logging.error("Decompression did not created expected output file")
recovered = open(input_file).read()
if recovered != "hi there":
logging.error("Decompression did not recover original input")
def cli_rng_tests(_tmp_dir):
test_cli("rng", "10", "D80F88F6ADBE65ACB10C")
test_cli("rng", "16", "D80F88F6ADBE65ACB10C3602E67D985B")
test_cli("rng", "10 6", "D80F88F6ADBE65ACB10C\n1B119CC068AF")
test_cli("rng", ['--format=base64', '10'], "2A+I9q2+ZayxDA==")
test_cli("rng", ['--format=base58', '10'], "D93XRyVfxqs7oR")
test_cli("rng", ['--format=base58check', '10'], "2NS1jYUq92TyGFVnhVLa")
hex_10 = re.compile('[A-F0-9]{20}')
for rng in ['system', 'auto', 'entropy']:
output = test_cli("rng", ["10", '--%s' % (rng)], use_drbg=False)
if output == "D80F88F6ADBE65ACB10C":
logging.error('RNG produced DRBG output')
if hex_10.match(output) is None:
logging.error('Unexpected RNG output %s' % (output))
has_rdrand = test_cli("cpuid", []).find(' rdrand ') > 0
if has_rdrand:
output = test_cli("rng", ["10", '--rdrand'], use_drbg=False)
if output == "D80F88F6ADBE65ACB10C":
logging.error('RDRAND produced DRBG output')
if hex_10.match(output) is None:
logging.error('Unexpected RNG output %s' % (output))
def cli_pk_workfactor_tests(_tmp_dir):
test_cli("pk_workfactor", "1024", "80")
test_cli("pk_workfactor", "2048", "111")
test_cli("pk_workfactor", ["--type=rsa", "512"], "58")
test_cli("pk_workfactor", ["--type=dl", "512"], "58")
test_cli("pk_workfactor", ["--type=dl_exp", "512"], "128")
def cli_dl_group_info_tests(_tmp_dir):
dl_output = re.compile('(P|G) = [A-F0-9]+')
for bits in [1024, 1536, 2048, 3072, 4096, 6144, 8192]:
output = test_cli("dl_group_info", "modp/ietf/%d" % (bits))
lines = output.split('\n')
if len(lines) != 2:
logging.error('Unexpected output from dl_group_info')
for l in lines:
if not dl_output.match(l):
logging.error('Unexpected output from dl_group_info')
def cli_ec_group_info_tests(_tmp_dir):
# pylint: disable=line-too-long
secp256r1_info = """P = FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF
A = FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC
B = 5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B
N = FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551
G = 6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296,4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5"""
secp256r1_pem = """-----BEGIN EC PARAMETERS-----
MIHgAgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP//////////
/////zBEBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12Ko6
k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsEQQRrF9Hy4SxCR/i85uVjpEDydwN9
gS3rM6D0oTlF2JjClk/jQuL+Gn+bjufrSnwPnhYrzjNXazFezsu2QGg3v1H1AiEA
/////wAAAAD//////////7zm+q2nF56E87nKwvxjJVECAQE=
-----END EC PARAMETERS-----"""
test_cli("ec_group_info", "secp256r1", secp256r1_info)
test_cli("ec_group_info", "--pem secp256r1", secp256r1_pem)
def cli_cpuid_tests(_tmp_dir):
cpuid_output = test_cli("cpuid", [])
if not cpuid_output.startswith('CPUID flags: '):
logging.error('Unexpected cpuid output %s' % (cpuid_output))
flag_re = re.compile('[a-z0-9_]+')
flags = cpuid_output[13:].split(' ')
for flag in flags:
if flag_re.match(flag) is None:
logging.error('Unexpected CPUID flag name %s' % (flag))
def cli_cc_enc_tests(_tmp_dir):
test_cli("cc_encrypt", ["8028028028028029", "pass"], "4308989841607208")
test_cli("cc_decrypt", ["4308989841607208", "pass"], "8028028028028027")
def cli_cert_issuance_tests(tmp_dir):
root_key = os.path.join(tmp_dir, 'root.key')
root_crt = os.path.join(tmp_dir, 'root.crt')
int_key = os.path.join(tmp_dir, 'int.key')
int_crt = os.path.join(tmp_dir, 'int.crt')
int_csr = os.path.join(tmp_dir, 'int.csr')
leaf_key = os.path.join(tmp_dir, 'leaf.key')
leaf_crt = os.path.join(tmp_dir, 'leaf.crt')
leaf_csr = os.path.join(tmp_dir, 'leaf.csr')
test_cli("keygen", ["--params=2048", "--output=" + root_key], "")
test_cli("keygen", ["--params=2048", "--output=" + int_key], "")
test_cli("keygen", ["--params=2048", "--output=" + leaf_key], "")
test_cli("gen_self_signed",
[root_key, "Root", "--ca", "--path-limit=2", "--output="+root_crt], "")
test_cli("gen_pkcs10", "%s Intermediate --ca --output=%s" % (int_key, int_csr))
test_cli("sign_cert", "%s %s %s --output=%s" % (root_crt, root_key, int_csr, int_crt))
test_cli("gen_pkcs10", "%s Leaf --output=%s" % (leaf_key, leaf_csr))
test_cli("sign_cert", "%s %s %s --output=%s" % (int_crt, int_key, leaf_csr, leaf_crt))
test_cli("cert_verify" "%s %s %s" % (leaf_crt, int_crt, root_crt), "Certificate passes validation checks")
def cli_timing_test_tests(_tmp_dir):
timing_tests = ["bleichenbacher", "manger",
"ecdsa", "ecc_mul", "inverse_mod", "pow_mod",
"lucky13sec3", "lucky13sec4sha1",
"lucky13sec4sha256", "lucky13sec4sha384"]
output_re = re.compile('[0-9]+;[0-9];[0-9]+')
for suite in timing_tests:
output = test_cli("timing_test", [suite, "--measurement-runs=16", "--warmup-runs=3"], None).split('\n')
for line in output:
if output_re.match(line) is None:
logging.error("Unexpected output in timing_test %s: %s", suite, line)
def cli_tls_ciphersuite_tests(_tmp_dir):
policies = ['default', 'suiteb_128', 'suiteb_192', 'strict', 'all']
versions = ['tls1.0', 'tls1.1', 'tls1.2']
ciphersuite_re = re.compile('^[A-Z0-9_]+$')
for policy in policies:
for version in versions:
if version != 'tls1.2' and policy != 'all':
continue
output = test_cli("tls_ciphers", ["--version=" + version, "--policy=" + policy], None).split('\n')
for line in output:
if ciphersuite_re.match(line) is None:
logging.error("Unexpected ciphersuite line %s", line)
def cli_asn1_tests(_tmp_dir):
input_pem = """-----BEGIN BLOB-----
MCACAQUTBnN0cmluZzEGAQH/AgFjBAUAAAAAAAMEAP///w==
-----END BLOB------
"""
expected = """d= 0, l= 32: SEQUENCE
d= 1, l= 1: INTEGER 05
d= 1, l= 6: PRINTABLE STRING string
d= 1, l= 6: SET
d= 2, l= 1: BOOLEAN true
d= 2, l= 1: INTEGER 63
d= 1, l= 5: OCTET STRING 0000000000
d= 1, l= 4: BIT STRING FFFFFF"""
test_cli("asn1print", "--pem -", expected, input_pem)
def cli_tls_socket_tests(tmp_dir):
client_msg = b'Client message %d\n' % (random.randint(0, 2**128))
server_port = random_port_number()
priv_key = os.path.join(tmp_dir, 'priv.pem')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
server_cert = os.path.join(tmp_dir, 'server.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp256r1", "--output=" + priv_key], "")
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("cert_verify", ca_cert, "Certificate did not validate - Cannot establish trust")
test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert))
tls_server = subprocess.Popen([CLI_PATH, 'tls_server', '--max-clients=1',
'--port=%d' % (server_port), server_cert, priv_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(.5)
tls_client = subprocess.Popen([CLI_PATH, 'tls_client', 'localhost',
'--port=%d' % (server_port), '--trusted-cas=%s' % (ca_cert)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(.5)
tls_client.stdin.write(client_msg)
tls_client.stdin.flush()
time.sleep(.5)
(stdout, stderr) = tls_client.communicate()
if stderr:
logging.error("Got unexpected stderr output %s" % (stderr))
if b'Handshake complete' not in stdout:
logging.error('Failed to complete handshake: %s' % (stdout))
if client_msg not in stdout:
logging.error("Missing client message from stdout %s" % (stdout))
tls_server.communicate()
def cli_tls_http_server_tests(tmp_dir):
if not check_for_command("tls_http_server"):
return
try:
from http.client import HTTPSConnection
except ImportError:
try:
from httplib import HTTPSConnection
except ImportError:
return
import ssl
server_port = random_port_number()
priv_key = os.path.join(tmp_dir, 'priv.pem')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
server_cert = os.path.join(tmp_dir, 'server.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp384r1", "--output=" + priv_key], "")
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert))
tls_server = subprocess.Popen([CLI_PATH, 'tls_http_server', '--max-clients=2',
'--port=%d' % (server_port), server_cert, priv_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(.5)
context = ssl.create_default_context(cafile=ca_cert)
conn = HTTPSConnection('localhost', port=server_port, context=context)
conn.request("GET", "/")
resp = conn.getresponse()
if resp.status != 200:
logging.error('Unexpected response status %d' % (resp.status))
body = str(resp.read())
if body.find('TLS negotiation with Botan 2.') < 0:
logging.error('Unexpected response body')
conn.request("POST", "/logout")
resp = conn.getresponse()
if resp.status != 405:
logging.error('Unexpected response status %d' % (resp.status))
if sys.version_info.major >= 3:
rc = tls_server.wait(5) # pylint: disable=too-many-function-args
else:
rc = tls_server.wait()
if rc != 0:
logging.error("Unexpected return code from https_server %d", rc)
def cli_tls_proxy_tests(tmp_dir):
# pylint: disable=too-many-locals,too-many-statements
if not check_for_command("tls_proxy"):
return
try:
from http.client import HTTPSConnection
except ImportError:
try:
from httplib import HTTPSConnection
except ImportError:
return
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
try:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
return
import ssl
import threading
server_port = random_port_number()
proxy_port = random_port_number()
while server_port == proxy_port:
proxy_port = random_port_number()
priv_key = os.path.join(tmp_dir, 'priv.pem')
ca_cert = os.path.join(tmp_dir, 'ca.crt')
crt_req = os.path.join(tmp_dir, 'crt.req')
server_cert = os.path.join(tmp_dir, 'server.crt')
test_cli("keygen", ["--algo=ECDSA", "--params=secp384r1", "--output=" + priv_key], "")
test_cli("gen_self_signed",
[priv_key, "CA", "--ca", "--country=VT",
"--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert],
"")
test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req))
test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert))
tls_proxy = subprocess.Popen([CLI_PATH, 'tls_proxy', str(proxy_port), '127.0.0.1', str(server_port),
server_cert, priv_key, '--output=/tmp/proxy.err', '--max-clients=2'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(.5)
server_response = binascii.hexlify(os.urandom(32))
def run_http_server():
class Handler(BaseHTTPRequestHandler):
def do_GET(self): # pylint: disable=invalid-name
self.send_response(200)
self.end_headers()
self.wfile.write(server_response)
httpd = HTTPServer(('', server_port), Handler)
httpd.serve_forever()
http_thread = threading.Thread(target=run_http_server)
http_thread.daemon = True
http_thread.start()
time.sleep(.5)
context = ssl.create_default_context(cafile=ca_cert)
for _i in range(2):
conn = HTTPSConnection('localhost', port=proxy_port, context=context)
conn.request("GET", "/")
resp = conn.getresponse()
if resp.status != 200:
logging.error('Unexpected response status %d' % (resp.status))
body = resp.read()
if body != server_response:
logging.error('Unexpected response from server %s' % (body))
if sys.version_info.major >= 3:
rc = tls_proxy.wait(5) # pylint: disable=too-many-function-args
else:
rc = tls_proxy.wait()
if rc != 0:
logging.error('Unexpected return code %d', rc)
def cli_trust_root_tests(tmp_dir):
pem_file = os.path.join(tmp_dir, 'pems')
dn_file = os.path.join(tmp_dir, 'dns')
test_cli("trust_roots", ['--dn-only', '--output=%s' % (dn_file)], "")
dn_re = re.compile('(.+=\".+\")(,.+=\".+\")')
for line in open(dn_file):
if dn_re.match(line) is None:
logging.error("Unexpected DN line %s", line)
test_cli("trust_roots", ['--output=%s' % (pem_file)], "")
def cli_tss_tests(tmp_dir):
data_file = os.path.join(tmp_dir, 'data')
exp_hash = "53B3C59276AE30EA7FD882268E80FD96AD80CC9FEB15F9FB940E7C4B5CF80B9E"
test_cli("rng", ["32", "--output=%s" % (data_file)], "")
test_cli("hash", ["--no-fsname", data_file], exp_hash)
m = 3
n = 5
test_cli("tss_split", [str(m), str(n), data_file, "--share-prefix=%s/split" % (tmp_dir)], "")
share_files = []
for i in range(1, n+1):
share = os.path.join(tmp_dir, "split%d.tss" % (i))
if not os.access(share, os.R_OK):
logging.error("Failed to create expected split file %s", share)
share_files.append(share)
rec5 = os.path.join(tmp_dir, "recovered_5")
test_cli("tss_recover", share_files + ["--output=%s" % (rec5)], "")
test_cli("hash", ["--no-fsname", rec5], exp_hash)
rec4 = os.path.join(tmp_dir, "recovered_4")
test_cli("tss_recover", share_files[1:] + ["--output=%s" % (rec4)], "")
test_cli("hash", ["--no-fsname", rec4], exp_hash)
rec3 = os.path.join(tmp_dir, "recovered_3")
test_cli("tss_recover", share_files[2:] + ["--output=%s" % (rec3)], "")
test_cli("hash", ["--no-fsname", rec3], exp_hash)
rec2 = os.path.join(tmp_dir, "recovered_2")
test_cli("tss_recover", share_files[3:] + ["--output=%s" % (rec2)], "", None,
b'Error: Insufficient shares to do TSS reconstruction\n')
def cli_pk_encrypt_tests(tmp_dir):
input_file = os.path.join(tmp_dir, 'input')
ctext_file = os.path.join(tmp_dir, 'ctext')
recovered_file = os.path.join(tmp_dir, 'recovered')
rsa_priv_key = os.path.join(tmp_dir, 'rsa.priv')
rsa_pub_key = os.path.join(tmp_dir, 'rsa.pub')
test_cli("keygen", ["--algo=RSA", "--provider=base", "--params=2048", "--output=%s" % (rsa_priv_key)], "")
key_hash = "891A3AA179639796B7A6348D2F1C3A8CC7E0FFED38BAE29143DF9B8A55391F28"
test_cli("hash", ["--no-fsname", "--algo=SHA-256", rsa_priv_key], key_hash)
test_cli("pkcs8", ["--pub-out", "%s/rsa.priv" % (tmp_dir), "--output=%s" % (rsa_pub_key)], "")
# Generate a random input file
test_cli("rng", ["10", "16", "32", "--output=%s" % (input_file)], "")
# Because we used a fixed DRBG for each invocation the same ctext is generated each time
rng_output_hash = "32F5E7B61357DE8397EFDA1E598379DFD5EE21767BDF4E2A435F05117B836AC6"
ctext_hash = "5F45F360CF431C3E1BC126B1DB20CFE7A869AE7B67484A64F426A6349245EB51"
test_cli("hash", ["--no-fsname", "--algo=SHA-256", input_file], rng_output_hash)
# Encrypt and verify ciphertext is the expected value
test_cli("pk_encrypt", [rsa_pub_key, input_file, "--output=%s" % (ctext_file)], "")
test_cli("hash", ["--no-fsname", "--algo=SHA-256", ctext_file], ctext_hash)
# Decrypt and verify plaintext is recovered
test_cli("pk_decrypt", [rsa_priv_key, ctext_file, "--output=%s" % (recovered_file)], "")
test_cli("hash", ["--no-fsname", "--algo=SHA-256", recovered_file], rng_output_hash)
def cli_uuid_tests(_tmp_dir):
fixed_drbg_seed = "802" * 32
test_cli("uuid", ['--rng-type=drbg', '--drbg-seed=' + fixed_drbg_seed], "D80F88F6-ADBE-45AC-B10C-3602E67D985B")
uuid_re = re.compile(r'[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}')
output = test_cli("uuid", [])
if uuid_re.match(output) is None:
logging.error('Bad uuid output %s' % (output))
def cli_tls_client_hello_tests(_tmp_dir):
# pylint: disable=line-too-long
chello = "16030100cf010000cb03035b3cf2457b864d7bef2a4b1f84fc3ced2b68d9551f3455ffdd305af277a91bb200003a16b816b716ba16b9cca9cca8c02cc030c02bc02fc0adc0acc024c00ac028c014c023c009c027c013ccaa009f009ec09fc09e006b003900670033010000680000000e000c000009676d61696c2e636f6d000500050100000000000a001a0018001d0017001a0018001b0019001c01000101010201030104000b00020100000d00140012080508040806050106010401050306030403001600000017000000230000ff01000100"
output = test_cli("tls_client_hello", ["--hex", "-"], None, chello)
output_hash = "8EBFC3205ACFA98461128FE5D081D19254237AF84F7DAF000A3C992C3CF6DE44"
test_cli("hash", ["--no-fsname", "--algo=SHA-256", "-"], output_hash, output)
def cli_speed_tests(_tmp_dir):
# pylint: disable=too-many-branches
msec = 1
output = test_cli("speed", ["--msec=%d" % (msec), "--buf-size=64,512", "AES-128"], None).split('\n')
if len(output) % 4 != 0:
logging.error("Unexpected number of lines for AES-128 speed test")
# pylint: disable=line-too-long
format_re = re.compile(r'^AES-128 .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "ChaCha20", "SHA-256", "HMAC(SHA-256)"], None).split('\n')
# pylint: disable=line-too-long
format_re = re.compile(r'^.* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "AES-128/GCM"], None).split('\n')
format_re_ks = re.compile(r'^AES-128/GCM\(16\) .* [0-9]+ key schedule/sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9]+ ms\)')
format_re_cipher = re.compile(r'^AES-128/GCM\(16\) .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)')
for line in output:
if format_re_ks.match(line) is None:
if format_re_cipher.match(line) is None:
logging.error('Unexpected line %s', line)
pk_algos = ["ECDSA", "ECDH", "SM2", "ECKCDSA", "ECGDSA", "GOST-34.10",
"DH", "DSA", "ElGamal", "Ed25519", "Curve25519", "NEWHOPE", "McEliece",
"RSA", "XMSS"]
output = test_cli("speed", ["--msec=%d" % (msec)] + pk_algos, None).split('\n')
# ECDSA-secp256r1 106 keygen/sec; 9.35 ms/op 37489733 cycles/op (1 op in 9 ms)
format_re = re.compile(r'^.* [0-9]+ ([A-Za-z ]+)/sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
# these all have a common output format
math_ops = ['mp_mul', 'mp_div', 'mp_div10', 'modexp', 'random_prime', 'inverse_mod',
'rfc3394', 'fpe_fe1', 'ecdsa_recovery', 'ecc_init', 'poly_dbl',
'bn_redc', 'nistp_redc', 'ecc_mult', 'ecc_ops', 'os2ecp', 'primality_test',
'bcrypt', 'passhash9', 'argon2']
format_re = re.compile(r'^.* [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9]+(\.[0-9]+)? ms\)')
for op in math_ops:
output = test_cli("speed", ["--msec=%d" % (msec), op], None).split('\n')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "scrypt"], None).split('\n')
format_re = re.compile(r'^scrypt-[0-9]+-[0-9]+-[0-9]+ \([0-9]+ MiB\) [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9]+ ms\)')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "RNG"], None).split('\n')
# ChaCha_RNG generate buffer size 1024 bytes: 954.431 MiB/sec 4.01 cycles/byte (477.22 MiB in 500.00 ms)
format_re = re.compile(r'^.* generate buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
# Entropy source rdseed output 128 bytes estimated entropy 0 in 0.02168 ms total samples 32
output = test_cli("speed", ["--msec=%d" % (msec), "entropy"], None).split('\n')
format_re = re.compile(r'^Entropy source [_a-z]+ output [0-9]+ bytes estimated entropy [0-9]+ in [0-9]+\.[0-9]+ ms .*total samples [0-9]+')
for line in output:
if format_re.match(line) is None:
logging.error("Unexpected line %s", line)
output = test_cli("speed", ["--msec=%d" % (msec), "--format=json", "AES-128"], None)
json_blob = json.loads(output)
if len(json_blob) < 2:
logging.error("Unexpected size for JSON output")
for b in json_blob:
for field in ['algo', 'op', 'events', 'bps', 'buf_size', 'nanos']:
if field not in b:
logging.error('Missing field %s in JSON record %s' % (field, b))
def main(args=None):
if args is None:
args = sys.argv
parser = optparse.OptionParser(
formatter=optparse.IndentedHelpFormatter(max_help_position=50))
parser.add_option('--verbose', action='store_true', default=False)
parser.add_option('--quiet', action='store_true', default=False)
(options, args) = parser.parse_args(args)
setup_logging(options)
if len(args) < 2:
logging.error("Usage: ./cli_tests.py path_to_botan_cli [test_regex]")
return 1
if not os.access(args[1], os.X_OK):
logging.error("Could not access/execute %s", args[1])
return 2
global CLI_PATH
CLI_PATH = args[1]
test_regex = None
if len(args) == 3:
try:
test_regex = re.compile(args[2])
except re.error as e:
logging.error("Invalid regex: %s", str(e))
return 1
start_time = time.time()
test_fns = [
cli_argon2_tests,
cli_asn1_tests,
cli_base32_tests,
cli_base58_tests,
cli_base64_tests,
cli_bcrypt_tests,
cli_cc_enc_tests,
cli_cert_issuance_tests,
cli_compress_tests,
cli_config_tests,
cli_cpuid_tests,
cli_dl_group_info_tests,
cli_ec_group_info_tests,
cli_entropy_tests,
cli_factor_tests,
cli_gen_dl_group_tests,
cli_gen_prime_tests,
cli_hash_tests,
cli_help_tests,
cli_hex_tests,
cli_hmac_tests,
cli_is_prime_tests,
cli_key_tests,
cli_xmss_sign_tests,
cli_mod_inverse_tests,
cli_pbkdf_tune_tests,
cli_pk_encrypt_tests,
cli_pk_workfactor_tests,
cli_psk_db_tests,
cli_rng_tests,
cli_speed_tests,
cli_timing_test_tests,
cli_tls_ciphersuite_tests,
cli_tls_client_hello_tests,
cli_tls_http_server_tests,
cli_tls_proxy_tests,
cli_tls_socket_tests,
cli_trust_root_tests,
cli_tss_tests,
cli_uuid_tests,
cli_version_tests,
]
for fn in test_fns:
fn_name = fn.__name__
if test_regex is not None:
if test_regex.search(fn_name) is None:
continue
logging.info("Running %s" % (fn_name))
start = time.time()
tmp_dir = tempfile.mkdtemp(prefix='botan_cli_')
try:
fn(tmp_dir)
except Exception as e: # pylint: disable=broad-except
logging.error("Test %s threw exception: %s", fn_name, e)
shutil.rmtree(tmp_dir)
end = time.time()
logging.debug("Ran %s in %.02f sec", fn_name, end-start)
end_time = time.time()
print("Ran %d tests with %d failures in %.02f seconds" % (
TESTS_RUN, TESTS_FAILED, end_time - start_time))
if TESTS_FAILED > 0:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
hot_reload.py | #!/usr/bin/env python
# coding:utf-8
from flask import Flask, request
import threading
import time
global_value = "voila"
def worker(global_value):
time.sleep(5)
global_value = "HAHAHA"
t = threading.Thread(target=worker,args=(global_value))
t.setDaemon(True)
t.start()
app = Flask(__name__)
@app.route('/')
def hello_world():
return global_value
#return 'Hello, World!'
@app.route('/user/<username>')
def show_user_profile(username):
# show the user profile for that user
return 'User %s' % username
@app.route('/post/<int:post_id>')
def show_post(post_id):
# show the post with the given id, the id is an integer
return 'Post %d' % post_id
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
return "login POST"
else:
return "login GET"
if __name__ == "__main__":
app.run(
debug=False,
host="127.0.0.1",
port=5000
)
print(app)
|
test_util.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape # pylint: disable=unused-import
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def")
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
# Reset the default graph so it has the C API enabled. We call
# reset_default_graph() instead of creating a new default Graph context to
# make this robust to tests that call reset_default_graph(), which requires
# that the current default graph isn't nested.
ops.reset_default_graph()
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# Make sure default graph reflects prev_value in case next test doesn't call
# reset_default_graph().
ops.reset_default_graph()
# pylint: disable=protected-access
def c_api_and_cuda_enabled():
return ops._USE_C_API and IsGoogleCudaEnabled()
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, False, *args, **kwargs)
return wrapper
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, True, *args, **kwargs)
return wrapper
# This decorator is a hacky way to run all the test methods in a decorated
# class with and without C API enabled.
# TODO(iga): Remove this and its uses once we switch to using C API by default.
def with_c_api(cls):
"""Adds methods that call original methods but with C API enabled.
Note this enables the C API in new methods after running the test class's
setup method. This can be a problem if some objects are created in it
before the C API is enabled.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
# If the C API is already enabled, don't do anything. Some tests break if the
# same test is run twice, so this allows us to turn on the C API by default
# without breaking these tests.
if ops._USE_C_API: return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCApi", enable_c_api(value))
return cls
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensor(obj):
try:
return (isinstance(obj, ops.Tensor) or
isinstance(obj, variables.Variable))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(id(obj) for obj in gc.get_objects() if _is_tensor(obj))
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
ops.get_default_graph()._graph_key = outside_graph_key
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
backprop._zeros_cache.flush()
context.get_default_context().ones_rank_cache().flush()
context.get_default_context().scalar_cache().clear()
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensor(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
if len(gc.garbage) > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error(
"Object %d of %d" % (i, len(gc.garbage) - previous_garbage))
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s" % (_safe_object_str(obj),))
logging.error(" Referrer types: %s" % (
', '.join([_safe_object_str(ref)
for ref in gc.get_referrers(obj)]),))
logging.error(" Referent types: %s" % (
', '.join([_safe_object_str(ref)
for ref in gc.get_referents(obj)]),))
logging.error(" Object attribute names: %s" % (dir(obj),))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception:
logging.error("(Exception while printing object)")
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def run_in_graph_and_eager_modes(__unused__=None,
graph=None,
config=None,
use_gpu=False,
force_gpu=False,
reset_test=True,
assert_no_eager_garbage=False):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test in eager mode. This will fail if there are reference cycles
(e.g. a = []; a.append(a)). Off by default because some tests may create
garbage for legitimate reasons (e.g. they define a class which inherits
from `object`), and because DEBUG_SAVEALL is sticky in some Python
interpreters (meaning that tests which rely on objects being collected
elsewhere in the unit test file will not work). Additionally, checks that
nothing still has a reference to Tensors that the test allocated.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with ops.Graph().as_default():
run_eager_mode(self, **kwargs)
return decorated
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
elif isinstance(tensor, resource_variable_ops.ResourceVariable):
return tensor.read_value().numpy()
elif callable(tensor):
return self._eval_helper(tensor())
else:
raise ValueError("Unsupported type %s." % type(tensor))
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg=msg, equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, dict)
if a_is_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = np.array(a)
b_as_ndarray = np.array(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s." % (path_str,
path_str))
except TypeError as e:
msg = "Error: a%s has %s, but b%s has %s" % (
path_str, type(a), path_str, type(b))
e.args = ((e.args[0] + ' : ' + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray`, or any arbitrarily nested of structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b, err_msg=msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal. %s" %
(device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
arcus_util.py | #
# arcus-python-client - Arcus python client drvier
# Copyright 2014 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import telnetlib, os, sys
import socket
import threading
from kazoo.client import KazooClient
import kazoo
from kazoo.exceptions import *
class arcus_cache:
def __init__(self, zk_addr, code):
self.code = code
self.zk_addr = zk_addr
self.node = []
self.active_node = []
self.dead_node = []
self.meta = ['', None]
def __repr__(self):
repr = '[Service Code: %s] (zk:%s)\n (node) %s\n (active) %s\n (dead) %s' % (self.code, self.zk_addr, self.node, self.active_node, self.dead_node)
return repr
class arcus_node:
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.name = ''
self.code = ''
self.zk_addr = ''
self.active = False
self.noport = False
def __repr__(self):
if self.name and self.code:
return '[%s:%s-(%s,%s)]' % (self.ip, self.port, self.name, self.code)
elif self.name:
return '[%s:%s-(%s)]' % (self.ip, self.port, self.name)
elif self.code:
return '[%s:%s-(%s)]' % (self.ip, self.port, self.code)
return '[%s:%s]' % (self.ip, self.port)
def do_arcus_command(self, command, timeout=0.2):
tn = telnetlib.Telnet(self.ip, self.port)
tn.write(bytes(command + '\r\n', 'utf-8'))
if command[0:5] == 'scrub' or command[0:5] == 'flush':
message = 'OK'
else:
message = 'END'
result = tn.read_until(bytes(message, 'utf-8'), timeout)
result = result.decode('utf-8')
tn.write(bytes('quit\r\n', 'utf-8'))
tn.close()
return result
class zookeeper:
def __init__(self, address):
self.address = address
self.zk = KazooClient(address)
self.zk.start()
self.arcus_cache_map = {}
self.arcus_node_map = {}
self.force = False
self.meta = ('', None)
self.meta_mtime = None
def __repr__(self):
repr = '[ZooKeeper: %s] %s, %s' % (self.address, self.meta[0], str(self.meta[1]))
for code, cache in self.arcus_cache_map.items():
repr = '%s\n\n%s' % (repr, cache)
return repr
def set_force(self):
self.force = True
def zk_read(self, path):
data, stat = self.zk.get(path)
children = self.zk.get_children(path)
return data, stat, children
def zk_children(self, path, watch=None):
if watch != None:
return self.zk.get_children(path, watch = watch)
else:
return self.zk.get_children(path)
def zk_children_if_exists(self, path, watch=None):
if self.zk_exists(path) == False:
return []
return self.zk_children(path, watch)
def zk_exists(self, path):
if self.zk.exists(path) == None:
return False
return True
def zk_create(self, path, value):
try:
self.zk.create(path, bytes(value, 'utf-8'))
except NodeExistsError:
if self.force == False:
raise NodeExistsError
def zk_delete(self, path):
try:
self.zk.delete(path)
except NoNodeError:
if self.force == False:
raise NoNodeError
def zk_update(self, path, value):
try:
self.zk.set(path, bytes(value, 'utf-8'))
except NoNodeError:
if self.force == False:
raise NoNodeError
def get_arcus_cache_list(self):
children = self.zk_children_if_exists('/arcus/cache_list/')
children += self.zk_children_if_exists('/arcus_repl/cache_list/')
return children
def get_arcus_node_of_code(self, code, server):
# repl case
children = self.zk_children_if_exists('/arcus_repl/cache_list/' + code)
children += self.zk_children_if_exists('/arcus/cache_list/' + code)
ret = []
for child in children:
tmp = child.split('^', 2) # remove repl info
if len(tmp) == 3:
child = tmp[2]
addr, name = child.split('-', 1)
ip, port = addr.split(':', 1)
if server != '' and (server != ip and server != name):
continue # skip this
node = arcus_node(ip, port)
node.name = name
ret.append(node)
return ret
def get_arcus_node_of_server(self, addr):
ip = socket.gethostbyname(addr)
children = self.zk_children_if_exists('/arcus_repl/cache_server_mapping/')
children += self.zk_children_if_exists('/arcus/cache_server_mapping/')
ret = []
for child in children:
l = len(ip)
if child[:l] == ip:
code = self.zk_children_if_exists('/arcus_repl/cache_server_mapping/' + child)
if len(code) == 0:
code = self.zk_children_if_exists('/arcus/cache_server_mapping/' + child)
code = code[0]
tmp = code.split('^') # remove repl info
code = tmp[0]
try:
ip, port = child.split(':')
except ValueError:
print('No port defined in cache_server_mapping: %s' % child)
continue
node = arcus_node(ip, port)
node.code = code
ret.append(node)
return ret
def _get_arcus_node(self, child, results):
code = self.zk_children_if_exists('/arcus_repl/cache_server_mapping/' + child)
if len(code) == 0:
code = self.zk_children_if_exists('/arcus/cache_server_mapping/' + child)
if len(code) == 0:
print('no childrens in cache_server_mapping error: %s' % child)
print(code)
return
code = code[0]
tmp = code.split('^') # remove repl info
code = tmp[0]
try:
ip, port = child.split(':')
except ValueError:
print('No port defined in cache_server_mapping: %s' % child)
ip = child
port = '0'
node = arcus_node(ip, port)
node.code = code
results.append(node)
def get_arcus_node_all(self):
children = self.zk_children_if_exists('/arcus_repl/cache_server_mapping/')
children += self.zk_children_if_exists('/arcus/cache_server_mapping/')
ret = []
threads = []
#print(children)
for child in children:
th = threading.Thread(target = self._get_arcus_node, args = (child, ret))
th.start()
threads.append(th)
for th in threads:
th.join()
return ret
def _get_arcus_meta(self, child, results):
data, stat, children = self.zk_read('/arcus/meta/' + child)
results[child] = [data.decode('utf-8'), stat]
def get_arcus_meta_all(self):
if self.zk.exists('/arcus/meta') == None:
self.zk.create('/arcus/meta', b'arcus meta info')
children = self.zk.get_children('/arcus/meta')
print('# children')
print(children)
threads = []
ret = {}
#print(children)
for child in children:
th = threading.Thread(target = self._get_arcus_meta, args = (child, ret))
th.start()
threads.append(th)
for th in threads:
th.join()
return ret
def _match_code_and_nodes(self, code, cache, meta):
#repl case
children = self.zk_children_if_exists('/arcus_repl/cache_list/' + code)
children += self.zk_children_if_exists('/arcus/cache_list/' + code)
for child in children:
tmp = child.split('^', 2) # remove repl info
if len(tmp) == 3:
child = tmp[2]
addr, name = child.split('-')
try:
node = self.arcus_node_map[addr]
except KeyError:
print('%s of %s is not defined in cache_server_mapping' % (addr, code))
ip, port = addr.split(':')
node = arcus_node(ip, port)
node.noport = True
node.active = True
cache.active_node.append(node)
for node in cache.node:
if node.active == False:
cache.dead_node.append(node)
if code in meta:
cache.meta = meta[code]
def load_all(self):
codes = self.get_arcus_cache_list()
for code in codes:
cache = arcus_cache(self.address, code)
self.arcus_cache_map[code] = cache
print('# get_arcus_node_all()')
nodes = self.get_arcus_node_all()
print('# done')
for node in nodes:
self.arcus_node_map[node.ip + ":" + node.port] = node
self.arcus_cache_map[node.code].node.append(node)
# meta info
print('# get_arcus_meta_all()')
meta = self.get_arcus_meta_all()
print('# done')
print('# match code & nodes')
threads = []
for code, cache in self.arcus_cache_map.items():
th = threading.Thread(target = self._match_code_and_nodes, args = (code, cache, meta))
th.start()
threads.append(th)
for th in threads:
th.join()
print('#done')
if 'zookeeper' in meta:
self.meta = meta['zookeeper']
def _callback(self, event):
child_list = self.zk.get_children(event.path)
cloud = os.path.basename(event.path)
cache = self.arcus_cache_map[cloud]
event_list = { 'created':[], 'deleted':[] }
current = {}
print('##### active node')
print(cache.active_node)
children = []
for child in child_list:
addr = child.split('-')[0]
children.append(addr)
print('#### children')
print(children)
for node in cache.active_node:
current[node.ip + ':' + node.port] = True
print('##### current')
print(current)
for node in cache.active_node:
addr = node.ip + ':' + node.port
if addr not in children:
event_list['deleted'].append(addr)
cache.active_node.remove(node)
for child in children:
if child not in current:
event_list['created'].append(child)
ip, port = child.split(':')
node = arcus_node(ip, port)
cache.active_node.append(node)
print('####### result')
print(cache.active_node)
self.callback(event, event_list)
children = self.zk.get_children(event.path, watch = self._callback)
def watch(self, callback):
self.callback = callback
for code, cache in self.arcus_cache_map.items():
children = self.zk_children_if_exists('/arcus/cache_list/' + code, watch=self._callback)
children += self.zk_children_if_exists('/arcus_repl/cache_list/' + code, watch=self._callback)
|
messenger.py | import importlib
import traceback
import sys
import zuullogger
import storage
import threading
class Messenger(object):
''' abstract class to load the real used messenger as plugin
TODO: There is not much wrapping in the moment... In case we'll going to add another messenger, then a lot of wrapping need to be done here
'''
def __init__(self, messenger_name, messenger_token, access_manager):
''' creates a messenger object by load the wanted plugin
Args:
messenger_name (:obj:`str`): name of the messenger to load
messenger_token (:obj:`str`): API token of the messenger needed to login as bot to the messenger system
access_manager (:obj:`obj`): the access manager object
'''
try:
self.messenger_name = messenger_name
self.messenger_token = messenger_token
self.access_manager = access_manager
myModule = importlib.import_module("m_" + messenger_name.lower())
self.my_messenger_class = getattr(myModule, "ZuulMessengerPlugin")
# Create a Thread with a function without any arguments
self.th = threading.Thread(target=self.run_thread)
# Start the thread
self.th.setDaemon(True)
self.th.start()
except:
print("Can't load plugin "+messenger_name)
self.plugin = None
traceback.print_exc(file=sys.stdout)
def run_thread(self):
'''starts the messenger'''
self.messenger = self.my_messenger_class(
self.messenger_token, self.access_manager)
def shutdown(self):
'''ends the messenger'''
self.messenger.shutdown()
if __name__ == '__main__':
# https://inventwithpython.com/blog/2014/12/20/translate-your-python-3-program-with-the-gettext-module/
'''
import gettext
localedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locale')
translate = gettext.translation('guess', localedir, fallback=True)
_ = translate.gettext
'''
def _(s): return s
logger = zuullogger.getLogger(__name__)
messenger_token = storage.read_config_value("messenger_token")
messenger_type = storage.read_config_value("messenger_type")
print(messenger_token, messenger_type)
if messenger_token and messenger_type:
messenger = Messenger(messenger_type, messenger_token)
else:
logger.error(
_("Config incomplete: No messenger_token or messenger_type"))
|
test_nanny.py | import asyncio
import gc
import logging
import multiprocessing as mp
import os
import random
from contextlib import suppress
from time import sleep
from unittest import mock
import psutil
import pytest
pytestmark = pytest.mark.gpu
from tlz import first, valmap
from tornado.ioloop import IOLoop
import dask
from dask.utils import tmpfile
from distributed import Nanny, Scheduler, Worker, rpc, wait, worker
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import CommClosedError, Status
from distributed.diagnostics import SchedulerPlugin
from distributed.metrics import time
from distributed.protocol.pickle import dumps
from distributed.utils import TimeoutError, parse_ports
from distributed.utils_test import captured_logger, gen_cluster, gen_test, inc
pytestmark = pytest.mark.ci1
@pytest.mark.slow
@gen_cluster(nthreads=[], timeout=120)
async def test_nanny(s):
async with Nanny(s.address, nthreads=2, loop=s.loop) as n:
async with rpc(n.address) as nn:
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.kill()
assert not n.is_alive()
start = time()
while n.worker_address in s.workers:
assert time() < start + 1
await asyncio.sleep(0.01)
await nn.kill()
assert not n.is_alive()
assert n.worker_address not in s.workers
await nn.instantiate()
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.terminate()
assert not n.is_alive()
@gen_cluster(nthreads=[])
async def test_many_kills(s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
assert n.is_alive()
await asyncio.gather(*(n.kill() for _ in range(5)))
await asyncio.gather(*(n.kill() for _ in range(5)))
await n.close()
@gen_cluster(Worker=Nanny)
async def test_str(s, a, b):
assert a.worker_address in str(a)
assert a.worker_address in repr(a)
assert str(a.nthreads) in str(a)
assert str(a.nthreads) in repr(a)
@gen_cluster(nthreads=[], client=True)
async def test_nanny_process_failure(c, s):
n = await Nanny(s.address, nthreads=2)
first_dir = n.worker_dir
assert os.path.exists(first_dir)
ww = rpc(n.worker_address)
await ww.update_data(data=valmap(dumps, {"x": 1, "y": 2}))
pid = n.pid
assert pid is not None
with suppress(CommClosedError):
await c.run(os._exit, 0, workers=[n.worker_address])
while n.pid == pid: # wait while process dies and comes back
await asyncio.sleep(0.01)
await asyncio.sleep(1)
while not n.is_alive(): # wait while process comes back
await asyncio.sleep(0.01)
# assert n.worker_address != original_address # most likely
while n.worker_address not in s.nthreads or n.worker_dir is None:
await asyncio.sleep(0.01)
second_dir = n.worker_dir
await n.close()
assert not os.path.exists(second_dir)
assert not os.path.exists(first_dir)
assert first_dir != n.worker_dir
await ww.close_rpc()
s.stop()
@gen_cluster(nthreads=[])
async def test_run(s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
with rpc(n.address) as nn:
response = await nn.run(function=dumps(lambda: 1))
assert response["status"] == "OK"
assert response["result"] == 1
await n.close()
@pytest.mark.slow
@gen_cluster(config={"distributed.comm.timeouts.connect": "1s"}, timeout=120)
async def test_no_hang_when_scheduler_closes(s, a, b):
# https://github.com/dask/distributed/issues/2880
with captured_logger("tornado.application", logging.ERROR) as logger:
await s.close()
await asyncio.sleep(1.2)
assert a.status == Status.closed
assert b.status == Status.closed
out = logger.getvalue()
assert "Timed out trying to connect" not in out
@pytest.mark.slow
@gen_cluster(
Worker=Nanny, nthreads=[("127.0.0.1", 1)], worker_kwargs={"reconnect": False}
)
async def test_close_on_disconnect(s, w):
await s.close()
start = time()
while w.status != Status.closed:
await asyncio.sleep(0.05)
assert time() < start + 9
class Something(Worker):
# a subclass of Worker which is not Worker
pass
@gen_cluster(client=True, Worker=Nanny)
async def test_nanny_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Worker" in list(out.values())[0]
assert w1.Worker is Worker
@gen_cluster(client=True, Worker=Nanny, worker_kwargs={"worker_class": Something})
async def test_nanny_alt_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Something" in list(out.values())[0]
assert w1.Worker is Something
@pytest.mark.slow
@gen_cluster(nthreads=[])
async def test_nanny_death_timeout(s):
await s.close()
w = Nanny(s.address, death_timeout=1)
with pytest.raises(TimeoutError):
await w
assert w.status == Status.closed
@gen_cluster(client=True, Worker=Nanny)
async def test_random_seed(c, s, a, b):
async def check_func(func):
x = c.submit(func, 0, 2 ** 31, pure=False, workers=a.worker_address)
y = c.submit(func, 0, 2 ** 31, pure=False, workers=b.worker_address)
assert x.key != y.key
x = await x
y = await y
assert x != y
await check_func(lambda a, b: random.randint(a, b))
np = pytest.importorskip("numpy")
await check_func(lambda a, b: np.random.randint(a, b))
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@gen_cluster(nthreads=[])
async def test_num_fds(s):
proc = psutil.Process()
# Warm up
w = await Nanny(s.address)
await w.close()
del w
gc.collect()
before = proc.num_fds()
for i in range(3):
w = await Nanny(s.address)
await asyncio.sleep(0.1)
await w.close()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
await asyncio.sleep(0.1)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(client=True, nthreads=[])
async def test_worker_uses_same_host_as_nanny(c, s):
for host in ["tcp://0.0.0.0", "tcp://127.0.0.2"]:
n = await Nanny(s.address, host=host)
def func(dask_worker):
return dask_worker.listener.listen_address
result = await c.run(func)
assert host in first(result.values())
await n.close()
@gen_test()
async def test_scheduler_file():
with tmpfile() as fn:
s = await Scheduler(scheduler_file=fn, dashboard_address=":0")
w = await Nanny(scheduler_file=fn)
assert set(s.workers) == {w.worker_address}
await w.close()
s.stop()
@gen_cluster(client=True, Worker=Nanny, nthreads=[("127.0.0.1", 2)])
async def test_nanny_timeout(c, s, a):
x = await c.scatter(123)
with captured_logger(
logging.getLogger("distributed.nanny"), level=logging.ERROR
) as logger:
response = await a.restart(timeout=0.1)
out = logger.getvalue()
assert "timed out" in out.lower()
start = time()
while x.status != "cancelled":
await asyncio.sleep(0.1)
assert time() < start + 7
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "400 MiB"},
)
async def test_nanny_terminate(c, s, a):
def leak():
L = []
while True:
L.append(b"0" * 5_000_000)
sleep(0.01)
before = a.process.pid
with captured_logger(logging.getLogger("distributed.nanny")) as logger:
future = c.submit(leak)
while a.process.pid == before:
await asyncio.sleep(0.01)
out = logger.getvalue()
assert "restart" in out.lower()
assert "memory" in out.lower()
@gen_cluster(
nthreads=[("127.0.0.1", 1)] * 8,
client=True,
Worker=Worker,
clean_kwargs={"threads": False},
)
async def test_throttle_outgoing_connections(c, s, a, *workers):
# But a bunch of small data on worker a
await c.run(lambda: logging.getLogger("distributed.worker").setLevel(logging.DEBUG))
remote_data = c.map(
lambda x: b"0" * 10000, range(10), pure=False, workers=[a.address]
)
await wait(remote_data)
def pause(dask_worker):
# Patch paused and memory_monitor on the one worker
# This is is very fragile, since a refactor of memory_monitor to
# remove _memory_monitoring will break this test.
dask_worker._memory_monitoring = True
dask_worker.status = Status.paused
dask_worker.outgoing_current_count = 2
await c.run(pause, workers=[a.address])
requests = [
await a.get_data(await w.rpc.connect(w.address), keys=[f.key], who=w.address)
for w in workers
for f in remote_data
]
await wait(requests)
wlogs = await c.get_worker_logs(workers=[a.address])
wlogs = "\n".join(x[1] for x in wlogs[a.address])
assert "throttling" in wlogs.lower()
@gen_cluster(nthreads=[], client=True)
async def test_avoid_memory_monitor_if_zero_limit(c, s):
nanny = await Nanny(s.address, loop=s.loop, memory_limit=0)
typ = await c.run(lambda dask_worker: type(dask_worker.data))
assert typ == {nanny.worker_address: dict}
pcs = await c.run(lambda dask_worker: list(dask_worker.periodic_callbacks))
assert "memory" not in pcs
assert "memory" not in nanny.periodic_callbacks
future = c.submit(inc, 1)
assert await future == 2
await asyncio.sleep(0.02)
await c.submit(inc, 2) # worker doesn't pause
await nanny.close()
@gen_cluster(nthreads=[], client=True)
async def test_scheduler_address_config(c, s):
with dask.config.set({"scheduler-address": s.address}):
nanny = await Nanny(loop=s.loop)
assert nanny.scheduler.address == s.address
start = time()
while not s.workers:
await asyncio.sleep(0.1)
assert time() < start + 10
await nanny.close()
@pytest.mark.slow
@gen_test()
async def test_wait_for_scheduler():
with captured_logger("distributed") as log:
w = Nanny("127.0.0.1:44737")
IOLoop.current().add_callback(w.start)
await asyncio.sleep(6)
await w.close()
log = log.getvalue()
assert "error" not in log.lower(), log
assert "restart" not in log.lower(), log
@gen_cluster(nthreads=[], client=True)
async def test_environment_variable(c, s):
a = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "123"})
b = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "456"})
await asyncio.gather(a, b)
results = await c.run(lambda: os.environ["FOO"])
assert results == {a.worker_address: "123", b.worker_address: "456"}
await asyncio.gather(a.close(), b.close())
@gen_cluster(nthreads=[], client=True)
async def test_environment_variable_by_config(c, s, monkeypatch):
with dask.config.set({"distributed.nanny.environ": "456"}):
with pytest.raises(TypeError, match="configuration must be of type dict"):
Nanny(s.address, loop=s.loop, memory_limit=0)
with dask.config.set({"distributed.nanny.environ": {"FOO": "456"}}):
# precedence
# kwargs > env var > config
with mock.patch.dict(os.environ, {"FOO": "BAR"}, clear=True):
a = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "123"})
x = Nanny(s.address, loop=s.loop, memory_limit=0)
b = Nanny(s.address, loop=s.loop, memory_limit=0)
await asyncio.gather(a, b, x)
results = await c.run(lambda: os.environ["FOO"])
assert results == {
a.worker_address: "123",
b.worker_address: "456",
x.worker_address: "BAR",
}
await asyncio.gather(a.close(), b.close(), x.close())
@gen_cluster(
nthreads=[],
client=True,
config={"distributed.nanny.environ": {"A": 1, "B": 2, "D": 4}},
)
async def test_environment_variable_config(c, s, monkeypatch):
monkeypatch.setenv("D", "123")
async with Nanny(s.address, env={"B": 3, "C": 4}) as n:
results = await c.run(lambda: os.environ)
assert results[n.worker_address]["A"] == "1"
assert results[n.worker_address]["B"] == "3"
assert results[n.worker_address]["C"] == "4"
assert results[n.worker_address]["D"] == "123"
@gen_cluster(nthreads=[], client=True)
async def test_data_types(c, s):
w = await Nanny(s.address, data=dict)
r = await c.run(lambda dask_worker: type(dask_worker.data))
assert r[w.worker_address] == dict
await w.close()
@gen_cluster(nthreads=[])
async def test_local_directory(s):
with tmpfile() as fn:
with dask.config.set(temporary_directory=fn):
w = await Nanny(s.address)
assert w.local_directory.startswith(fn)
assert "dask-worker-space" in w.local_directory
assert w.process.worker_dir.count("dask-worker-space") == 1
await w.close()
def _noop(x):
"""Define here because closures aren't pickleable."""
pass
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_process_worker_no_daemon(c, s, a):
def multiprocessing_worker():
p = mp.Process(target=_noop, args=(None,))
p.start()
p.join()
await c.submit(multiprocessing_worker)
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_pool_worker_no_daemon(c, s, a):
def pool_worker(world_size):
with mp.Pool(processes=world_size) as p:
p.map(_noop, range(world_size))
await c.submit(pool_worker, 4)
@gen_cluster(nthreads=[])
async def test_nanny_closes_cleanly(s):
async with Nanny(s.address) as n:
assert n.process.pid
proc = n.process.process
assert not n.process
assert not proc.is_alive()
assert proc.exitcode == 0
@pytest.mark.slow
@gen_cluster(nthreads=[], timeout=60)
async def test_lifetime(s):
counter = 0
event = asyncio.Event()
class Plugin(SchedulerPlugin):
def add_worker(self, **kwargs):
pass
def remove_worker(self, **kwargs):
nonlocal counter
counter += 1
if counter == 2: # wait twice, then trigger closing event
event.set()
s.add_plugin(Plugin())
async with Nanny(s.address):
async with Nanny(s.address, lifetime="500 ms", lifetime_restart=True):
await event.wait()
@gen_cluster(client=True, nthreads=[])
async def test_nanny_closes_cleanly_2(c, s):
async with Nanny(s.address) as n:
with c.rpc(n.worker_address) as w:
IOLoop.current().add_callback(w.terminate)
start = time()
while n.status != Status.closed:
await asyncio.sleep(0.01)
assert time() < start + 5
assert n.status == Status.closed
@gen_cluster(client=True, nthreads=[])
async def test_config(c, s):
async with Nanny(s.address, config={"foo": "bar"}) as n:
config = await c.run(dask.config.get, "foo")
assert config[n.worker_address] == "bar"
@gen_cluster(client=True, nthreads=[])
async def test_nanny_port_range(c, s):
nanny_port = "9867:9868"
worker_port = "9869:9870"
async with Nanny(s.address, port=nanny_port, worker_port=worker_port) as n1:
assert n1.port == 9867 # Selects first port in range
async with Nanny(s.address, port=nanny_port, worker_port=worker_port) as n2:
assert n2.port == 9868 # Selects next port in range
with pytest.raises(
ValueError, match="Could not start Nanny"
): # No more ports left
async with Nanny(s.address, port=nanny_port, worker_port=worker_port):
pass
# Ensure Worker ports are in worker_port range
def get_worker_port(dask_worker):
return dask_worker.port
worker_ports = await c.run(get_worker_port)
assert list(worker_ports.values()) == parse_ports(worker_port)
class KeyboardInterruptWorker(worker.Worker):
"""A Worker that raises KeyboardInterrupt almost immediately"""
async def heartbeat(self):
def raise_err():
raise KeyboardInterrupt()
self.loop.add_callback(raise_err)
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
@pytest.mark.asyncio
async def test_nanny_closed_by_keyboard_interrupt(cleanup, protocol):
if protocol == "ucx": # Skip if UCX isn't available
pytest.importorskip("ucp")
async with Scheduler(protocol=protocol, dashboard_address=":0") as s:
async with Nanny(
s.address, nthreads=1, worker_class=KeyboardInterruptWorker
) as n:
n.auto_restart = False
await n.process.stopped.wait()
# Check that the scheduler has been notified about the closed worker
assert len(s.workers) == 0
class StartException(Exception):
pass
class BrokenWorker(worker.Worker):
async def start(self):
raise StartException("broken")
@gen_cluster(nthreads=[])
async def test_worker_start_exception(s):
# make sure this raises the right Exception:
with pytest.raises(StartException):
async with Nanny(s.address, worker_class=BrokenWorker) as n:
pass
@gen_cluster(nthreads=[])
async def test_failure_during_worker_initialization(s):
with captured_logger(logger="distributed.nanny", level=logging.WARNING) as logs:
with pytest.raises(Exception):
async with Nanny(s.address, foo="bar") as n:
await n
assert "Restarting worker" not in logs.getvalue()
@gen_cluster(client=True, Worker=Nanny)
async def test_environ_plugin(c, s, a, b):
from dask.distributed import Environ
await c.register_worker_plugin(Environ({"ABC": 123}))
async with Nanny(s.address, name="new") as n:
results = await c.run(os.getenv, "ABC")
assert results[a.worker_address] == "123"
assert results[b.worker_address] == "123"
assert results[n.worker_address] == "123"
|
testserver.py | """
This is a simple web-server that does very few things. It is necessary for
the downloader tests.
Here is the logic behind the initialization:
Because several instances of the test can run simultaneously on the Build
machine, we have to take this into account and not start another server if
one is already running. However, there is a chance that a server will not
terminate correctly, and will still hold the port, so we will not be able
to initialize another server.
So before initializing the server, we check if any processes are using the port
that we want to use. If we find such a process, we assume that it might be
working, and wait for about 10 seconds for it to start serving. If it does not,
we kill it.
Next, we check the name of our process and see if there are other processes
with the same name. If there are, we assume that they might start serving any
moment. So we iterate over the ones that have PID lower than ours, and wait
for them to start serving. If a process doesn't serve, we kill it.
If we have killed (or someone has) all the processes with PIDs lower than ours,
we try to start serving. If we succeed, we kill all other processes with the
same name as ours. If we don't someone else will kill us.
"""
from __future__ import print_function
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from ResponseProvider import Payload
from ResponseProvider import ResponseProvider
from ResponseProvider import ResponseProviderMixin
from SiblingKiller import SiblingKiller
from threading import Timer
import os
import socket
import threading
import traceback
import logging
import logging.config
try:
from tornado_handler import MainHandler
USE_TORNADO = True
except:
USE_TORNADO = False
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
PORT = 34568
LIFESPAN = 180.0 # timeout for the self destruction timer - how much time
# passes between the last request and the server killing
# itself
PING_TIMEOUT = 5 # Nubmer of seconds to wait for ping response
class InternalServer(HTTPServer):
def kill_me(self):
self.shutdown()
logging.info("The server's life has come to an end, pid: {}".format(os.getpid()))
def reset_selfdestruct_timer(self):
if self.self_destruct_timer:
self.self_destruct_timer.cancel()
self.self_destruct_timer = Timer(LIFESPAN, self.kill_me)
self.self_destruct_timer.start()
def __init__(self, server_address, RequestHandlerClass,
bind_and_activate=True):
HTTPServer.__init__(self, server_address, RequestHandlerClass,
bind_and_activate=bind_and_activate)
self.self_destruct_timer = None
self.clients = 1
self.reset_selfdestruct_timer()
def suicide(self):
self.clients -= 1
if self.clients == 0:
if self.self_destruct_timer is not None:
self.self_destruct_timer.cancel()
quick_and_painless_timer = Timer(0.1, self.kill_me)
quick_and_painless_timer.start()
class TestServer:
def __init__(self):
self.may_serve = False
pid = os.getpid()
logging.info("Init server. Pid: {}".format(pid))
self.server = None
killer = SiblingKiller(PORT, PING_TIMEOUT)
killer.kill_siblings()
if killer.allow_serving():
try:
self.init_server()
logging.info("Started server with pid: {}".format(pid))
self.may_serve = True
except socket.error:
logging.info("Failed to start the server: Port is in use")
except Exception as e:
logging.debug(e)
logging.info("Failed to start serving for unknown reason")
traceback.print_exc()
else:
logging.info("Not allowed to start serving for process: {}".format(pid))
def init_server(self):
if USE_TORNADO:
MainHandler.init_server(PORT, LIFESPAN)
else:
print("""
*************
WARNING: Using the python's built-in BaseHTTPServer!
It is all right if you run the tests on your local machine, but if you are running tests on a server,
please consider installing Tornado. It is a much more powerful web-server. Otherwise you will find
that some of your downloader tests either fail or hang.
do
sudo pip install tornado
or go to http://www.tornadoweb.org/en/stable/ for more detail.
*************
""")
self.server = InternalServer(('localhost', PORT), PostHandler)
def start_serving(self):
if not self.may_serve:
return
if USE_TORNADO:
MainHandler.start_serving()
else:
thread = threading.Thread(target=self.server.serve_forever)
thread.deamon = True
thread.start()
class PostHandler(BaseHTTPRequestHandler, ResponseProviderMixin):
def dispatch_response(self, payload):
self.send_response(payload.response_code())
for h in payload.headers():
self.send_header(h, payload.headers()[h])
self.send_header("Content-Length", payload.length())
self.end_headers()
self.wfile.write(payload.message())
def init_vars(self):
self.response_provider = ResponseProvider(self)
def do_POST(self):
self.init_vars()
self.server.reset_selfdestruct_timer()
length = int(self.headers.getheader('content-length'))
self.dispatch_response(Payload(self.rfile.read(length)))
def do_GET(self):
headers = self.prepare_headers()
self.init_vars()
self.dispatch_response(self.response_provider.response_for_url_and_headers(self.path, headers))
def prepare_headers(self):
ret = dict()
for h in self.headers:
ret[h] = self.headers.get(h)
return ret
def got_pinged(self):
self.server.clients += 1
def kill(self):
logging.debug("Kill called in testserver")
self.server.suicide()
if __name__ == '__main__':
server = TestServer()
server.start_serving()
|
bootstrap.py | """
Bootstrap an installation of TLJH.
Sets up just enough TLJH environments to invoke tljh.installer.
This script is run as:
curl <script-url> | sudo python3 -
Constraints:
- Entire script should be compatible with Python 3.6 (We run on Ubuntu 18.04+)
- Script should parse in Python 3.4 (since we exit with useful error message on Ubuntu 14.04+)
- Use stdlib modules only
"""
import os
from http.server import SimpleHTTPRequestHandler, HTTPServer
import multiprocessing
import subprocess
import sys
import logging
import shutil
import urllib.request
html = """
<html>
<head>
<title>The Littlest Jupyterhub</title>
</head>
<body>
<meta http-equiv="refresh" content="30" >
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width">
<img class="logo" src="https://raw.githubusercontent.com/montaguegabe/the-littlest-jupyterhub/master/docs/images/logo/logo.png">
<div class="loader center"></div>
<div class="center main-msg">Please wait while your TLJH is building...</div>
<div class="center logs-msg">Click the button below to see the logs</div>
<div class="center tip" >Tip: to update the logs, refresh the page</div>
<button class="logs-button center" onclick="window.location.href='/logs'">View logs</button>
</body>
<style>
button:hover {
background: grey;
}
.logo {
width: 150px;
height: auto;
}
.center {
margin: 0 auto;
margin-top: 50px;
text-align:center;
display: block;
}
.main-msg {
font-size: 30px;
font-weight: bold;
color: grey;
text-align:center;
}
.logs-msg {
font-size: 15px;
color: grey;
}
.tip {
font-size: 13px;
color: grey;
margin-top: 10px;
font-style: italic;
}
.logs-button {
margin-top:15px;
border: 0;
color: white;
padding: 15px 32px;
font-size: 16px;
cursor: pointer;
background: #f5a252;
}
.loader {
width: 150px;
height: 150px;
border-radius: 90%;
border: 7px solid transparent;
animation: spin 2s infinite ease;
animation-direction: alternate;
}
@keyframes spin {
0% {
transform: rotateZ(0deg);
border-top-color: #f17c0e
}
100% {
transform: rotateZ(360deg);
border-top-color: #fce5cf;
}
}
</style>
</head>
</html>
"""
logger = logging.getLogger(__name__)
def get_os_release_variable(key):
"""
Return value for key from /etc/os-release
/etc/os-release is a bash file, so should use bash to parse it.
Returns empty string if key is not found.
"""
return subprocess.check_output([
'/bin/bash', '-c',
"source /etc/os-release && echo ${{{key}}}".format(key=key)
]).decode().strip()
# Copied into tljh/utils.py. Make sure the copies are exactly the same!
def run_subprocess(cmd, *args, **kwargs):
"""
Run given cmd with smart output behavior.
If command succeeds, print output to debug logging.
If it fails, print output to info logging.
In TLJH, this sends successful output to the installer log,
and failed output directly to the user's screen
"""
logger = logging.getLogger('tljh')
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *args, **kwargs)
printable_command = ' '.join(cmd)
if proc.returncode != 0:
# Our process failed! Show output to the user
logger.error('Ran {command} with exit code {code}'.format(
command=printable_command, code=proc.returncode
))
logger.error(proc.stdout.decode())
raise subprocess.CalledProcessError(cmd=cmd, returncode=proc.returncode)
else:
# This goes into installer.log
logger.debug('Ran {command} with exit code {code}'.format(
command=printable_command, code=proc.returncode
))
# This produces multi line log output, unfortunately. Not sure how to fix.
# For now, prioritizing human readability over machine readability.
logger.debug(proc.stdout.decode())
def validate_host():
"""
Make sure TLJH is installable in current host
"""
# Support only Ubuntu 18.04+
distro = get_os_release_variable('ID')
version = float(get_os_release_variable('VERSION_ID'))
if distro != 'ubuntu':
print('The Littlest JupyterHub currently supports Ubuntu Linux only')
sys.exit(1)
elif float(version) < 18.04:
print('The Littlest JupyterHub requires Ubuntu 18.04 or higher')
sys.exit(1)
if sys.version_info < (3, 5):
print("bootstrap.py must be run with at least Python 3.5")
sys.exit(1)
if not (shutil.which('systemd') and shutil.which('systemctl')):
print("Systemd is required to run TLJH")
# Only fail running inside docker if systemd isn't present
if os.path.exists('/.dockerenv'):
print("Running inside a docker container without systemd isn't supported")
print("We recommend against running a production TLJH instance inside a docker container")
print("For local development, see http://tljh.jupyter.org/en/latest/contributing/dev-setup.html")
sys.exit(1)
class LoaderPageRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == "/logs":
with open("/opt/tljh/installer.log", "r") as log_file:
logs = log_file.read()
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(logs.encode('utf-8'))
elif self.path == "/index.html":
self.path = "/var/run/index.html"
return SimpleHTTPRequestHandler.do_GET(self)
elif self.path == "/favicon.ico":
self.path = "/var/run/favicon.ico"
return SimpleHTTPRequestHandler.do_GET(self)
elif self.path == "/":
self.send_response(302)
self.send_header('Location','/index.html')
self.end_headers()
else:
SimpleHTTPRequestHandler.send_error(self, code=403)
def serve_forever(server):
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def main():
flags = sys.argv[1:]
temp_page_flag = "--show-progress-page"
# Check for flag in the argv list. This doesn't use argparse
# because it's the only argument that's meant for the boostrap script.
# All the other flags will be passed to and parsed by the installer.
if temp_page_flag in flags:
with open("/var/run/index.html", "w+") as f:
f.write(html)
favicon_url="https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/share/jupyterhub/static/favicon.ico"
urllib.request.urlretrieve(favicon_url, "/var/run/favicon.ico")
# If the bootstrap is run to upgrade TLJH, then this will raise an "Address already in use" error
try:
loading_page_server = HTTPServer(("", 80), LoaderPageRequestHandler)
p = multiprocessing.Process(target=serve_forever, args=(loading_page_server,))
# Serves the loading page until TLJH builds
p.start()
# Remove the flag from the args list, since it was only relevant to this script.
flags.remove("--show-progress-page")
# Pass the server's pid as a flag to the istaller
pid_flag = "--progress-page-server-pid"
flags.extend([pid_flag, str(p.pid)])
except OSError:
# Only serve the loading page when installing TLJH
pass
validate_host()
install_prefix = os.environ.get('TLJH_INSTALL_PREFIX', '/opt/tljh')
hub_prefix = os.path.join(install_prefix, 'hub')
# Set up logging to print to a file and to stderr
os.makedirs(install_prefix, exist_ok=True)
file_logger_path = os.path.join(install_prefix, 'installer.log')
file_logger = logging.FileHandler(file_logger_path)
# installer.log should be readable only by root
os.chmod(file_logger_path, 0o500)
file_logger.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
file_logger.setLevel(logging.DEBUG)
logger.addHandler(file_logger)
stderr_logger = logging.StreamHandler()
stderr_logger.setFormatter(logging.Formatter('%(message)s'))
stderr_logger.setLevel(logging.INFO)
logger.addHandler(stderr_logger)
logger.setLevel(logging.DEBUG)
logger.info('Checking if TLJH is already installed...')
if os.path.exists(os.path.join(hub_prefix, 'bin', 'python3')):
logger.info('TLJH already installed, upgrading...')
initial_setup = False
else:
logger.info('Setting up hub environment')
initial_setup = True
# Install software-properties-common, so we can get add-apt-repository
# That helps us make sure the universe repository is enabled, since
# that's where the python3-pip package lives. In some very minimal base
# VM images, it looks like the universe repository is disabled by default,
# causing bootstrapping to fail.
run_subprocess(['apt-get', 'update', '--yes'])
run_subprocess(['apt-get', 'install', '--yes', 'software-properties-common'])
run_subprocess(['add-apt-repository', 'universe'])
run_subprocess(['apt-get', 'update', '--yes'])
run_subprocess(['apt-get', 'install', '--yes',
'python3',
'python3-venv',
'python3-pip',
'git'
])
logger.info('Installed python & virtual environment')
os.makedirs(hub_prefix, exist_ok=True)
run_subprocess(['python3', '-m', 'venv', hub_prefix])
logger.info('Set up hub virtual environment')
if initial_setup:
logger.info('Setting up TLJH installer...')
else:
logger.info('Upgrading TLJH installer...')
pip_flags = ['--upgrade']
if os.environ.get('TLJH_BOOTSTRAP_DEV', 'no') == 'yes':
pip_flags.append('--editable')
tljh_repo_path = os.environ.get(
'TLJH_BOOTSTRAP_PIP_SPEC',
'git+https://github.com/montaguegabe/the-littlest-jupyterhub.git'
)
# Upgrade pip
run_subprocess([
os.path.join(hub_prefix, 'bin', 'pip'),
'install',
'--upgrade',
'pip==20.0.*'
])
logger.info('Upgraded pip')
run_subprocess([
os.path.join(hub_prefix, 'bin', 'pip'),
'install'
] + pip_flags + [tljh_repo_path])
logger.info('Setup tljh package')
logger.info('Starting TLJH installer...')
os.execv(
os.path.join(hub_prefix, 'bin', 'python3'),
[
os.path.join(hub_prefix, 'bin', 'python3'),
'-m',
'tljh.installer',
] + flags
)
if __name__ == '__main__':
main()
|
api.py | import os
import sys
import logging
import paramiko
import subprocess
import multiprocessing
if sys.version_info.major == 2:
from environment import Environment
else:
from yarn.environment import Environment
from getpass import getpass
from contextlib import contextmanager
from paramiko.ssh_exception import AuthenticationException
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# I really, really wish I could change the format of this to have my
# connection_string in it, but I am unwilling to break the logger to do it.
logging.basicConfig(format='[%(asctime)s] %(levelname)s - %(funcName)s: %(message)s')
# Here is the global environment for the system. Pretty much everyone will
# use this.
env = Environment()
# Starting the work for local execution per GitHub Issue #20
def local(command):
proc = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
stdout = [a.decode('utf-8').strip() for a in stdout.splitlines()]
stderr = ["ERROR: [LOCAL] '{}'".format(a.decode('utf-8').strip()) for a in stderr.splitlines()]
if not stderr:
if not env.quiet:
for a in stdout:
logging.info("[LOCAL] - {}".format(a))
ret = "\n".join(stdout)
return ret
if not env.quiet:
logging.warning("\n".join(stderr))
logging.warning("ENV_DEBUG: '{}'".format(local("env")))
if not env.warn_only:
sys.exit(1)
# Starting the work for sudo per GitHub Issue #20
def sudo(command):
if not env.password:
env.password = getpass("Password for {}: ".format(env.connection_string))
@ssh_connection
def sudo_command(*args, **kwargs):
conn = kwargs['conn']
stdin, stdout, stderr = conn.exec_command(kwargs['command'], get_pty=True)
output_buffer = ""
while not '[sudo]' in output_buffer:
output_buffer += stdout.channel.recv(2048).decode('utf-8')
stdin.write('{}\n'.format(env.password))
stdin.flush()
stdout = [a.decode('utf-8').rstrip() for a in stdout.read().splitlines() if a]
stderr = ["ERROR: [{}] '{}'".format(env.connection_string, a.decode('utf-8').rstrip()) for a in stderr.read().splitlines()]
if not stderr:
if not env.quiet:
for a in stdout:
logging.info("[{}] - {}".format(env.connection_string, a))
return "\n".join(stdout)
if not env.quiet:
logging.warning("\n".join(stderr))
logging.warning("ENV_DEBUG: '{}'".format(run("env")))
if not env.warn_only:
sys.exit(1)
return sudo_command(command='sudo -Si {}'.format(command))
# The joys of running in parallel
def parallel(wrapped_function):
def _wrapped(*args, **kwargs):
if env.run_parallel:
task = multiprocessing.Process(target=wrapped_function, args=args, kwargs=kwargs)
env.parallel_tasks.append(task)
task.start()
else:
return wrapped_function(*args, **kwargs)
return _wrapped
# This might be somewhat important.
def ssh_connection(wrapped_function):
logging.info("Creating SSH connection to: {}".format(env.connection_string))
def _wrapped(*args, **kwargs):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if env.key is not None and env._paramiko_key is None:
env._paramiko_key = paramiko.RSAKey.from_private_key(open(env.key), password=env.passphrase)
if not env.host_string:
env.host_string = input("No hosts were specified. Host IP/DNS Name: ")
try:
# Here is where the conncetion is setup.
ssh.connect(env.host_string, env.host_port, username=env.user,
pkey=env._paramiko_key)
return wrapped_function(*args, conn=ssh, **kwargs)
except AuthenticationException:
# If there is a problem with the pervious attempt (no/bad password)
# Here is where we will query for it and try again.
env.password = getpass("Password for {}: ".format(env.connection_string))
ssh.connect(env.host_string, env.host_port, username=env.user,
password=env.password)
return wrapped_function(*args, conn=ssh, **kwargs)
finally:
# Gotta love the cleanup associated with the finally call in Python.
logging.info("Closing connection: {}".format(env.connection_string))
ssh.close()
return _wrapped
@contextmanager
def cd(path):
# Yes, I know it's simplistic. But if it's stupid and it works, then it
# ain't stupid.
env.working_directory.append(path)
yield
env.working_directory.pop()
# The meat and potatoes of the entire system.
def run(command):
@ssh_connection
def run_command(*args, **kwargs):
command = kwargs['command']
if env.working_directory:
command = "cd {} && {}".format(" && cd ".join(env.working_directory), command)
ssh = kwargs.pop('conn')
if not env.quiet:
logger.debug("'{}' on '{}'".format(command, env.connection_string))
stdin, stdout, stderr = ssh.exec_command(command)
# I will defeat the horrible setup for logging I have implemented.
# Give me time.
stdout = [a.decode('utf-8').strip() for a in stdout.read().splitlines()]
stderr = ["ERROR: [{}] '{}'".format(env.connection_string, a.decode('utf-8').strip()) for a in stderr.read().splitlines()]
if not stderr:
if not env.quiet:
for a in stdout:
logging.info("[{}] - {}".format(env.connection_string, a))
return "\n".join(stdout)
if not env.quiet:
logging.warning("\n".join(stderr))
logging.warning("ENV_DEBUG: '{}'".format(run("env")))
if not env.warn_only:
sys.exit(1)
return False
return run_command(command=command)
# Putting a file is handy. I may decide to check and see if there is already
# an identical file in place so that we don't copy the same file over and over
# again. Hmmmm....
def put(local_path, remote_path):
@ssh_connection
def put_file(*args, **kwargs):
ssh = kwargs['conn']
local_path = kwargs['local_path']
remote_path = kwargs['remote_path']
logger.debug("Uploading {} to {}:{}".format(
local_path, env.connection_string, remote_path))
ftp = ssh.open_sftp()
ftp.put(local_path, remote_path)
ftp.close()
return put_file(local_path=local_path, remote_path=remote_path)
# Getting a file is nifty.
def get(remote_path, local_path=None):
@ssh_connection
def get_file(*args, **kwargs):
ssh = kwargs['conn']
remote_path = kwargs['remote_path']
local_path = kwargs['local_path']
logger.debug("Downloading {}:{}. Placing it: {}".format(
env.connection_string, remote_path, local_path))
ftp = ssh.open_sftp()
ftp.get(remote_path, local_path)
ftp.close()
if not local_path:
local_path = os.path.join(local_path, os.path.split(remote_path)[-1])
return get_file(remote_path=remote_path, local_path=local_path)
|
ffctrl.py | from __future__ import print_function
import json
from queue import Queue
import socket
import threading
# Python 3 compatibility
try:
input = raw_input
except NameError:
pass
class FirefoxRemoteControl(object):
''' Interact with a web browser running the Remote Control extension. '''
def __init__(self, port):
self.sock = socket.socket()
self.sock.connect(('localhost', port))
def execute(self, cmd):
msg = cmd.replace('\n', ' ') + '\r\n'
self.sock.send(msg.encode('utf8'))
ret = []
while True:
chunk = self.sock.recv(4096)
ret.append(chunk)
if b'\n' in chunk:
break
res = json.loads(b''.join(ret).decode('utf8'))
if 'error' in res:
raise Exception(res['error'])
elif not res:
return None
else:
return res['result']
class FirefoxDebuggerControl(object):
''' Interact with a Firefox browser with remote debugging enabled.
Requires the about:config options "devtools.debugger.remote-enabled" and "devtools.chrome.enabled" enabled,
and the browser to be started with "--no-remote --start-debugger-server <port>"
'''
def __init__(self, port):
self.sock = socket.socket()
self.sock.connect(('localhost', port))
info = self._recv_msg()
self.actors = {}
self.thread = threading.Thread(target=self._receive_thread)
self.thread.daemon = True
self.thread.start()
tab_list = self._send_recv('root', 'listTabs')
pages = tab_list['tabs']
if len(pages) == 0:
raise Exception("No pages to attach to!")
elif len(pages) == 1:
page = pages[0]
else:
print("Select a page to attach to:")
for i, page in enumerate(pages):
title = self._send_recv(page['actor'], 'getTarget')['frame']['title']
title = title.encode('unicode_escape').decode('iso-8859-1')
if len(title) > 100:
title = title[:100] + '...'
print("%d) %s" % (i+1, title))
while 1:
try:
pageidx = int(input("Selection? "))
page = pages[pageidx-1]
break
except Exception as e:
print("Invalid selection:", e)
page = self._send_recv(page['actor'], 'getTarget')['frame']
self.page = page
self._send_recv(page['actor'], 'attach')
def _actor_msgs(self, actor):
while True:
yield self.actors[actor].get()
def _recv_msg(self):
msgsz = b''
for i in range(10):
c = self.sock.recv(1)
if not c:
raise EOFError()
if c == b':':
break
msgsz += c
if not msgsz.isdigit():
raise ValueError("invalid length field: %s" % msgsz)
msgsz = int(msgsz)
msg = bytearray()
while len(msg) < msgsz:
chunk = self.sock.recv(msgsz - len(msg))
if not chunk:
raise EOFError()
msg += chunk
return json.loads(bytes(msg))
def _send_msg(self, actor, msgtype, obj=None):
if actor not in self.actors:
self.actors[actor] = Queue()
if obj is None:
obj = {}
else:
obj = obj.copy()
obj['to'] = actor
obj['type'] = msgtype
msg = json.dumps(obj).encode()
self.sock.send(str(len(msg)).encode() + b':')
self.sock.send(msg)
def _send_recv(self, actor, msgtype, obj=None):
self._send_msg(actor, msgtype, obj)
reply = next(self._actor_msgs(actor))
if 'error' in reply:
raise Exception(reply['error'], reply.get('message', ''))
return reply
def _receive_thread(self):
''' Continually read events and command results '''
while 1:
try:
msg = self._recv_msg()
self.actors[msg['from']].put(msg)
except Exception as e:
print("disconnect: %s" % e)
break
def execute(self, cmd):
resp = self._send_recv(self.page['consoleActor'], 'evaluateJSAsync', {'text': cmd})
resultID = resp['resultID']
for result in self._actor_msgs(self.page['consoleActor']):
if result['hasException']:
raise Exception(result['exceptionMessage'])
return result['result']
|
tractrac.py | #!/usr/bin/env python
#%%
#==============================================================================
# TRACTRAC -Masive Object Tracking Software
#==============================================================================
#usage: tractrac.py [-h] [-f FILE] [-tf TFILE] [-mmf MOTIONMODELFILE] [-a]
# [-o OUTPUT] [-opp] [-s] [-p PLOT] [-sp] [-cmin CMIN]
# [-cmax CMAX] [-ca CALPHA] [-par PARALLEL]
#
#TRACTRAC v2.01 (16/04/2019) - Joris Heyman
#
#optional arguments:
# -h, --help show this help message and exit
# -f FILE, --file FILE Video Filename to track
# -tf TFILE, --tfile TFILE
# Time of frame file
# -mmf MOTIONMODELFILE, --motionmodelfile MOTIONMODELFILE
# Motion Model file
# -a, --averages Save average velocity maps
# -o OUTPUT, --output OUTPUT
# Save tracking results in a file ASCII (1) or HDF5 (2)
# -opp, --outputpp Save Post Processing results in a file
# -s, --silent No tracking infos
# -p PLOT, --plot PLOT Live plot of tracking results
# -sp, --saveplot Save plots in image sequence
# -cmin CMIN, --cmin CMIN
# Minimum velocity (px/frame) for plotting
# -cmax CMAX, --cmax CMAX
# Maximum velocity (px/frame) for plotting
# -ca CALPHA, --calpha CALPHA
# Alpha value for arrows
# -par PARALLEL, --parallel PARALLEL
# Visualization in a Parallel Thread
#==============================================================================
# # Get Help :
# # >> python tractrac.py --help
#==============================================================================
#==============================================================================
# # Example runs
# # Default video
# # >> python tractrac.py -a
# # >> python tractrac.py -p 1
# # WebCam
# # >> python tractrac.py -f '0' -p 1 -cmax=50
# # Other video file
# # >> python tractrac.py -f '../Sample_videos/videotest.avi' -p 1
# # Other image sequence
# # >> python tractrac.py -f '../Sample_videos/PIVChallenge/*.tif' -a -p 2
# # >> python tractrac.py -f '../Sample_videos/RiverDrone/*.tif' -a -o 1-p 2
# # Type
#==============================================================================
#=============================================
global version
version= '2.02 (20/04/2019)'
#==============================================================================
#v2.0 __________________________________
# Fast Nearest Neighboor Search Integration (via scipy.spatial.DTree). No more paralelization windows are needed.
#version= '1.5 (03/01/2017)'
#%matplotlib auto
# Notes of versions
# v1.5 _________________________________________________________________________________
# Possibility to execute tractrac both as a module (import tractrac) or directly in bash (python tractrac.py)
# Integration of non-constant times step
# Possibility to read an "interpolated" motion model image of U and V
# Parameter file now called *_par.txt
# Fixed some difference between Matlab and Python version, some diff remain notaob_detectiony in the error thresholds
import time
import glob
import numpy as np
import numpy.matlib
import scipy.spatial.distance
import scipy.spatial as scp
import cv2
from matplotlib.pyplot import cm
import matplotlib.pyplot as plt
#import pdb
from parse import *
import sys
import os,os.path
import multiprocessing as mp
#from matplotlib.patches import Circle
#from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
import imutils # To check opencv versions
from scipy.interpolate import griddata
import os,os.path
import argparse
import h5py # For saving binary format
#from tractrac_toolbox import *
#def run(**kwargs):
#% Tractrac Toolbox
def Propagate_MotionModel_KdTree(C,Xm,Um,Em,Xm_old,Um_old,Em_old,th):
# KdTree
Xref=np.vstack((Xm,Xm_old))
Uref=np.vstack((Um,Um_old))
Eref=np.hstack((Em,Em_old))
#Make Tree
tXref=scp.cKDTree(Xref)
# Get firsts nn'th neighboors of query points
nn=np.minimum(np.minimum(np.int8(th[0]['motion_av']),C.shape[0]),Xref.shape[0])
#print nn
distance,neighboors=tXref.query(C,k=nn)
# Make matrices and compute local average
neighboors=np.array(neighboors)
values_U=Uref[neighboors.flatten(),:].reshape(-1,nn,2)
values_E=Eref[neighboors.flatten()].reshape(-1,nn)
# Averaging over neighboors values
# Option 1 : simple average of neighbooring values
# U_filt=np.nanmean(values_U,axis=1).reshape(-1,2)
# E_filt=np.nanmean(values_E,axis=1).reshape(-1)
# Option 2 : weighted average with weight on previous model
W=np.ones(Xref.shape[0])
#W[:Xm.shape[0]]=W[:Xm.shape[0]]*np.maximum(0,-Em)
W[-Xm_old.shape[0]:]=W[-Xm_old.shape[0]:]*th[0]['filter_time']#*np.maximum(0,-Em_old)
Wneighboors=W[neighboors.flatten()].reshape(-1,nn)
U_filt=np.sum(values_U*Wneighboors.reshape(Wneighboors.shape[0],Wneighboors.shape[1],-1),axis=1).reshape(-1,2)/np.sum(Wneighboors,axis=1).reshape(Wneighboors.shape[0],-1)
E_filt=np.sum(values_E*Wneighboors,axis=1)/np.sum(Wneighboors,axis=1)
# Option 2 : weighted average with weight on previous model + distance + error
# Find nan and replace by default 0 value
idgood=np.isfinite(U_filt[:,0])
# Save non nan to ponderate next iteration
Xm_old=C[idgood,:]
Um_old=U_filt[idgood,:]
Em_old=E_filt[idgood]
# Replace nan if no model points where given
if len(U_filt)>0:
idnan=np.isnan(U_filt[:,0])
U_filt[idnan,:]=0
E_filt[np.isnan(E_filt)]=2
return U_filt,E_filt,Xm_old,Um_old,Em_old
#%
# Initialize plot window and cbar
def init_plot(w,h):
global fig,ax,qui_c,Cmin,Cmax,alpha,img,qui
size=10.0
fig=plt.figure(figsize=(float(w)/float(h)*size,size))
ax=fig.add_subplot(111)
plt.show(block=False)
qui=plt.quiver([],[],[],[], [], cmap = cm.rainbow, headlength=7,alpha=0.5)
plt.clim(Cmin,Cmax)
qui_c=plt.colorbar(qui,orientation='vertical', ax=ax,label='Velocity [px/frame]')
img=plt.imshow(np.zeros((h,w)),cmap='gray',clim=[0,1],zorder=-1,interpolation='nearest')
ax.axis([0,w,h,0])
def plot(q):
global fig,ax,qui_c,Cmin,Cmax,alpha,th,SAVE_PLOT,plot_folder,img,qui
image,points,vectors,col,n,err = q
img.set_data(image)
if len(vectors)>0:
norm=np.sqrt(vectors[:,0]**2.+vectors[:,1]**2)
norm[norm==0]=1 # To avoid division problem
vectors[:,1]=vectors[:,1]/norm
vectors[:,0]=vectors[:,0]/norm
# vth=0.
# idquiv=np.where(norm>vth)[0]
# idscat=np.where(norm<vth)[0]
#ax.cla()
# Print Image
# Print Scatter
#sca=plt.scatter(points[:,0],points[:,1],s=th[0]['peak_conv_size']*20,c=col,alpha=alpha, cmap = cm.rainbow,vmin=Cmin,vmax=Cmax)
# Print Quiver
# angle=np.arctan2(vectors[:,0],vectors[:,1])*180/3.14
# for i in range(len(col)):
# plt.scatter(points[i,0],points[i,1],s=th[0]['peak_conv_size']*20,c=col[i],marker=(3,0,angle[i]),alpha=alpha, cmap = cm.rainbow,vmin=Cmin,vmax=Cmax)
# Quver plot
if len(vectors)==0:
qui.remove()
qui=plt.quiver([],[],[],[], [], cmap = cm.rainbow, headlength=7,alpha=0.5)
else:
#qui=plt.quiver(points[idquiv,0],points[idquiv,1],vectors[idquiv,0],-vectors[idquiv,1], col[idquiv], cmap = cm.rainbow, pivot='middle', linewidth=.0,headwidth=1., headaxislength=1.,alpha=alpha); plt.clim(Cmin,Cmax)
qui.remove()
qui=plt.quiver(points[:,0],points[:,1],vectors[:,0],-vectors[:,1], col[:], cmap = cm.rainbow, pivot='middle', linewidth=.0,headwidth=1., headaxislength=1.,alpha=alpha); plt.clim(Cmin,Cmax)
#plt.scatter(points[idscat,0],points[idscat,1],c=col[idscat], s=2., cmap = cm.rainbow, alpha=alpha,edgecolors=None); plt.clim(Cmin,Cmax)
#ax.invert_yaxis()
#vectors[:,1]=-vectors[:,1]
if len(vectors)==0:
title='TracTrac v2.0 | Frame {:04d} | No object found'.format(n)
else:
title='TracTrac v2.0 | Frame {:04d} | Points tracked {:5d} | Mean Err. {:1.4f}px'.format(n,points.shape[0],10**err)
ax.set_title(title)
fig.canvas.draw()
if SAVE_PLOT:
imname=plot_folder+'img{:04d}.png'.format(n)
plt.savefig(imname,bbox_inches='tight',dpi=100)
# For plotting in Parralal process
def visualization_worker(q):
global fig,ax,qui_c,Cmin,Cmax,alpha,th,plot_folder
stop=True
while stop:
image,points,vectors,col,n,err,stop = q.get()
plot([image,points,vectors,col,n,err])
def read_parameter_file(filename):
par=[{}]
if os.path.exists(filename):
with open(filename) as f:
for line in f:
s=search('{varname:w} {varvalue:g}',line)
if (s != None): par[0][str(s['varname'])]=s['varvalue']
print('Parameter file read !')
else:
print('WARNING: no parameter file exists. Taking default values! ')
return par
def write_parameter_file(filename,th):
global version
f = open(filename, 'w')
f.write('# Parameter file generated by TracTrac Python v'+ version+'\n\n')
f.write('# Video loops \n')
f.write('vid_loop {}\n\n'.format(th[0]['vid_loop']))
f.write('# Image Processing\n')
f.write('ROIxmin {}\n'.format(th[0]['ROIxmin']))
f.write('ROIymin {}\n'.format(th[0]['ROIymin']))
f.write('ROIxmax {}\n'.format(th[0]['ROIxmax']))
f.write('ROIymax {}\n'.format(th[0]['ROIymax']))
f.write('BG {}\n'.format(th[0]['BG']))
f.write('BGspeed {}\n'.format(th[0]['BGspeed']))
f.write('noise {}\n'.format(th[0]['noise']))
f.write('noise_size {}\n\n'.format(th[0]['noise_size']))
f.write('# Object Detection\n')
f.write('peak_th {}\n'.format(th[0]['peak_th']))
f.write('peak_th_auto {}\n'.format(th[0]['peak_th_auto']))
f.write('peak_neigh {}\n'.format(th[0]['peak_neigh']))
f.write('peak_conv {}\n'.format(th[0]['peak_conv']))
f.write('peak_conv_size {}\n'.format(th[0]['peak_conv_size']))
f.write('peak_subpix {}\n'.format(th[0]['peak_subpix']))
f.write('peak_minima {}\n\n'.format(th[0]['peak_minima']))
f.write('# Motion Model \n')
f.write('motion {}\n'.format(th[0]['motion']))
f.write('motion_av {}\n'.format(th[0]['motion_av']))
f.write('motion_it {}\n'.format(th[0]['motion_it']))
f.write('filter {}\n'.format(th[0]['filter']))
f.write('filter_time {}\n'.format(th[0]['filter_time']))
f.write('filter_std {}\n'.format(th[0]['filter_std']))
f.close()
def set_default_parameter(th,w,h):
# set missing parameters to default fvalues
if not('ROIxmin' in th[0]):th[0]['ROIxmin']=0
if not('ROIxmax' in th[0]):th[0]['ROIxmax']=w
if not('ROIymin' in th[0]):th[0]['ROIymin']=0
if not('ROIymax' in th[0]):th[0]['ROIymax']=h
if not('BG' in th[0]):th[0]['BG']=0
if not('BGspeed' in th[0]):th[0]['BGspeed']=0.02
if not('noise' in th[0]):th[0]['noise']=0
if not('noise_size' in th[0]):th[0]['noise_size']=3
if not('peak_th' in th[0]):th[0]['peak_th']=0.02
if not('peak_th_auto' in th[0]):th[0]['peak_th_auto']=1
if not('peak_neigh' in th[0]):th[0]['peak_neigh']=1
if not('peak_conv_size' in th[0]):th[0]['peak_conv_size']=10.
if not('peak_conv' in th[0]):th[0]['peak_conv']=1
if not('peak_subpix' in th[0]):th[0]['peak_subpix']=1
if not('peak_minima' in th[0]):th[0]['peak_minima']=0
if not('motion' in th[0]):th[0]['motion']=1
if not('motion_av' in th[0]):th[0]['motion_av']=10
if not('motion_it' in th[0]):th[0]['motion_it']=0
if not('filter' in th[0]):th[0]['filter']=1
if not('filter_time' in th[0]):th[0]['filter_time']=1.0
if not('filter_std' in th[0]):th[0]['filter_std']=1.5
if not('vid_loop' in th[0]):th[0]['vid_loop']=2
return th
def times_f(a,b):
# print a.shape,b.shape
c=a*b
return c
def blob_detection(F,th):
# LoG or DoG detection kernel
scale=th[0]['peak_conv_size']
size=(int(np.maximum(3,(int(scale*3.)//2)*2+1)),int(np.maximum(3,(int(scale*3.)//2)*2+1)))
# print(size)
#print size
Ff=-F # No kernel
if th[0]['peak_conv']==1 : # DoG kernel
Ff = cv2.GaussianBlur(F,size,scale*0.8,cv2.BORDER_REPLICATE)
Ff = cv2.GaussianBlur(Ff,size,scale*1.2,cv2.BORDER_REPLICATE)-Ff
if th[0]['peak_conv']==2 : # LoG kernel
Ff_temp = cv2.GaussianBlur(F,size,scale*1.0,cv2.BORDER_REPLICATE)
Ff = cv2.Laplacian(Ff_temp,cv2.CV_32F,cv2.BORDER_REPLICATE)
#print Ff.shape,Ff.max()
if th[0]['peak_minima']==0 : Ff=-Ff
return Ff
def feature_detection(F,th):
# Detection of remarkable feature : Harris, Log and DoG kernel
if th[0]['peak_conv']==3: # OPENCV implementation of Shi and Tomasi
Ff=F
corners = cv2.goodFeaturesToTrack(Ff,10000,th[0]['peak_th'],th[0]['peak_conv_size'])
x,y=np.array([cc[0,0] for cc in corners]),np.array([cc[0,1] for cc in corners])
z=np.zeros((len(x)))
else: # Blob detection based on Log and Dog kernel followed by peak finder
Ff=blob_detection(F,th)
[x,y,z]=maximaThresh(Ff,1+2*int(th[0]['peak_neigh']),th)
return Ff,x,y,z
def imProj(I,proj):
if proj.shape[0]>=4:
if (proj.shape[1]!=4)|(proj.shape[0]<4):
print('ERROR: Bad formating of _proj.txt file. See documentation')
return I
# Projective transform if file found
src_points = np.float32([proj[i,:2] for i in range(proj.shape[0])])
dst_points = np.float32([proj[i,2:] for i in range(proj.shape[0])])
projective_matrix = cv2.getPerspectiveTransform(src_points, dst_points)
# Compute image size
pts_bnd = np.float32([[0,0],[0,I.shape[0]],[I.shape[1],I.shape[0]],[I.shape[1],0]]).reshape(-1,1,2)
pts_bnd_= cv2.perspectiveTransform(pts_bnd, projective_matrix)
[xmin, ymin] = np.int32(pts_bnd_.min(axis=0).ravel() - 0.5)
[xmax, ymax] = np.int32(pts_bnd_.max(axis=0).ravel() + 0.5)
t = [-xmin,-ymin]
Ht = np.array([[1,0,t[0]],[0,1,t[1]],[0,0,1]]) # Translate
I = cv2.warpPerspective(I,Ht.dot(projective_matrix), (xmax-xmin,ymax-ymin))
# Only square
#I = cv2.warpPerspective(I, projective_matrix, (int(proj[:,2].max()),int(proj[:,3].max())))
return I
def imProc(I,th):
# Default pre-processing steps on images
# To Greyscale
if len(I.shape)>2: I = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
if I.dtype == 'uint8': Im = np.float32(I)/256. # ALways convert to float Images
if I.dtype == 'uint16': Im = np.float32(I)/2.**16 # ALways convert to float
# Crop
ROIymin=np.minimum(0,th[0]['ROIymin'])
ROIymax=np.minimum(I.shape[0],th[0]['ROIymax'])
ROIxmin=np.maximum(0,th[0]['ROIxmin'])
ROIxmax=np.minimum(I.shape[1],th[0]['ROIxmax'])
Im = Im[ROIymin:ROIymax,ROIxmin:ROIxmax]
#Filtering
if th[0]['noise'] == 1: Im = cv2.medianBlur(Im,th[0]['noise_size'])
#Im = cv2.GaussianBlur(Im,(5,5),2)
return Im
def distanceMat(x,y):
dist=scipy.spatial.distance.cdist(x,y)
return dist
def cropPoints(x,y,ROI):
idgood = np.where((x>ROI[0,0])&(x<ROI[1,0])&(y>ROI[0,1])&(y<ROI[1,1]))
return idgood[0]
def cropPoints3d(x,y,z,ROI):
idgood = np.where((x>ROI[0,0])&(x<ROI[1,0])&(y>ROI[0,1])&(y<ROI[1,1])&(z>ROI[0,2])&(z<ROI[1,2]))
return idgood[0]
def initialize_ROI(size,n,nbnd):
ROIbnd=[]
ROI=[]
for j in range(n[0]):
for i in range(n[1]):
for k in range(n[2]):
ROIbnd.append(np.array([[(j)*size[0]/n[0]-nbnd[0],(i)*size[1]/n[1]-nbnd[1],k*size[2]/n[2]-nbnd[2]],[(j+1)*size[0]/n[0]+nbnd[0],(i+1)*size[1]/n[1]+nbnd[1],(k+1)*size[2]/n[2]+nbnd[2]]]))
ROI.append(np.array([[(j)*size[0]/n[0],(i)*size[1]/n[1],k*size[2]/n[2]],[(j+1)*size[0]/n[0],(i+1)*size[1]/n[1],(k+1)*size[2]/n[2]]]))
return ROIbnd,ROI
def imadjust(I):
I=(I-np.min(I))/(np.max(I)-np.min(I))
return I
def maximaThresh(a,n,th):
# Find n*n local max
method=th[0]['peak_subpix']
a=np.float64(a)
if a.max()<th[0]['peak_th']:
print('Warning : peak_th ({:1.4f}) is above the maximum image convoluted value ({:1.4f}).'.format(th[0]['peak_th'],a.max()))
r=np.random.rand(np.shape(a)[0],np.shape(a)[1])*1e-5
mask=np.ones((n,n),np.uint8)
b=cv2.dilate(a+r,mask,iterations = 1)
c=((a+r==b)&(a>th[0]['peak_th']))
[y,x]=np.where(c)
# Remove points on border
w=np.shape(a)[1]
h=np.shape(a)[0]
nb=np.floor(n/2.)
idbnd=np.where((y<h-nb)&(y>=nb)&(x<w-nb)&(x>=nb))
x=np.array(np.float64(x[idbnd])).reshape(-1)
y=np.array(np.float64(y[idbnd])).reshape(-1)
# Subpixel Refinement Method
# 2nd order poly fit on the logarithm of diag of a
Dx=np.zeros(x.shape);Dy=np.zeros(x.shape);z=np.zeros(x.shape);
if method==1: [Dx,Dy,z]=subpix2nd(np.real(np.log(a-np.min(a)+1e-8)),x,y,n) #Gaussian
if method==0: [Dx,Dy,z]=subpix2nd(a,x,y,n) # Quadratic
# Take only peaks that moved less than 0.5
idgood=np.where((np.abs(Dx)<0.5)&(np.abs(Dy)<0.5))
#print x,y,Dx,Dy
x=x[idgood]+Dx[idgood]
y=y[idgood]+Dy[idgood]
z=z[idgood]
return x,y,z
def subpix2nd(a,x,y,n):
#% Subpixel approximation of a 2nd order polynomial with a pencil of length
#% np
npen=np.floor(n/2.)
pencil=np.arange(-npen,npen+1)
X=np.matlib.repmat(pencil,np.size(x),1)
YH=np.zeros(X.shape)
YV=np.zeros(X.shape)
# YD1=np.zeros(X.shape)
# YD2=np.zeros(X.shape)
n=np.float32(len(pencil))
for i in range(0,len(pencil)):
idV=sub2ind(np.shape(a),np.maximum(0,np.minimum(a.shape[0]-1,y+pencil[i])),x)
idH=sub2ind(np.shape(a),y,np.maximum(0,np.minimum(a.shape[1]-1,x+pencil[i])))
YV[:,i]=a.flat[idV]
YH[:,i]=a.flat[idH]
# 2nd order poly a+bx+cx^2=0
s2=np.sum(pencil**2.)
s4=np.sum(pencil**4.)
bH=np.sum(YH*X,1)/s2
cH=-(-s2*np.sum(YH,1)+n*np.sum(X**2.*YH,1))/(s2**2.-s4*n)
bV=np.sum(YV*X,1)/s2
cV=-(-s2*np.sum(YV,1)+n*np.sum(X**2.*YV,1))/(s2**2.-s4*n)
cH[cH==0]=1e-8
cV[cV==0]=1e-8
# Peaks on hor and vert axis
dH=-bH/cH/2.
dV=-bV/cV/2.
Dx=dH
Dy=dV
Z=YH[:,int((n-1)/2.)]
return Dx,Dy,Z
def is_image(f):
# check if f is an image
flag_im=0
if len(f)>=4:
if (f[-3:]=='tif') | (f[-3:]=='TIF') | (f[-4:]=='tiff') | (f[-3:]=='png') | (f[-3:]=='jpg') | (f[-4:]=='jpeg') :
flag_im=1
return flag_im
def sub2ind(array_shape, rows, cols):
return np.uint32(rows*array_shape[1] + cols)
##############################################
# Function to initiate a computation Tractrac inside a python sript.
#
# Optional input arguments are :
# f=videofilename (string) : Pathname of the video file to analyse.
# tf=time_filename (string) : Pathname of the time stamp of video frames.
# p=plotting_option (0,1,2,3): for no plot, normal plot, motion model plot, or motion model error plot.
# o=output_file (0,1,2) : (1) save the tracking results in a ASCII file of name videofilename_track.txt or (2) in a binary hdf5 file videofilename_track.hdf5.
# s=silent (0,1) : verbose or silent computation.
# sp=save_plot (0,1) : Save images of each plots.
# clim=(float,float) : caxis color limits for plotting.
# alpha=float : Transparency of plotting
#
# Output arguments are:
# Pts (np.array) : Result array of tracklets. Columns are in order : 0 Frame, 1 Track ID, 2 X, 3 Y, 4 U, 5 V, 6 U motion model, 7 V motion model of columns, 8 X Acceleration, 9 Y acceleration, 10 Motion model error
# th (list dictionnary) : Tracking parameters
##############################################
#%%
def run(**kwargs):
global SAVE_PLOT,Cmin,Cmax,alpha,INFO,PAR,plot_folder
# Take arguments or Default values
filename=kwargs.get('f','../Sample_videos/videotest.avi') # Default Sample Video
tfile=kwargs.get('tf','') # File with time stamp of video frames
mmfilename=kwargs.get('mmf','')
PLOT=kwargs.get('p',0)
OUTPUT=kwargs.get('o',0)
INFO=kwargs.get('s',0)
SAVE_PLOT=kwargs.get('sp',0)
Cmin,Cmax=kwargs.get('clim',(0.0,5.0))
alpha=kwargs.get('calpha',0.5)
th=kwargs.get('th',[{}])
PAR=kwargs.get('par',0)
Pts,th=tractrac(filename,th,mmfilename,tfile,PLOT,OUTPUT)
return Pts,th
# Main tracking FUNCTION
def tractrac(filename,th,mmfilename,tfile,PLOT,OUTPUT):
global SAVE_PLOT,Cmin,Cmax,alpha,INFO,PAR,version,plot_folder
#%%
# Check if file exist
# if not(os.path.isfile(filename)):
# print 'Video File does not exist! Abord.'
# sys.exit()
nchar=43
sep="="*nchar
if INFO:
print(sep)
print("|"+ ' TRACTRAC v'+version+' - Heyman J. '+ " |")
print(sep)
print("> OpenCV Version: {}".format(cv2.__version__)) # Check OpenCV version
print('> file : '+filename)
print(sep)
# Read Video Stream or image sequence
flag_im=is_image(filename)
flag_web=0 # flag if videosource is webcam
# Check if projective transform file exist in folder
path,name=os.path.split(filename)
# Read Projective transform file if it exist
if os.path.isfile(path+'/projection.txt'):
proj=np.loadtxt(path+'/projection.txt')
else:
proj=np.array([])
if flag_im: # Image list
flist=sorted(glob.glob(filename))
#pdb.set_trace()
I0=imProj(cv2.imread(flist[0],2),proj)
nFrames=len(flist)
height,width=I0.shape[:2]
elif filename=='0': # WebCam
flag_web=1
cv2.destroyAllWindows()
cap = cv2.VideoCapture(0)
nFrames=10000
I0=cap.read()[1]
I0=imProj(I0,proj)
height,width=I0.shape[:2]
else: # Video
cv2.destroyAllWindows()
cap = cv2.VideoCapture(filename)
I0=cap.read()[1]
I0=imProj(I0,proj)
height,width=I0.shape[:2]
if imutils.is_cv2():
cap.set(cv2.cv.CAP_PROP_POS_FRAMES,0) # Rewind
nFrames=int(cap.get(cv2.cv.CAP_PROP_FRAME_COUNT))
elif imutils.is_cv3() or imutils.is_cv4() :
cap.set(cv2.CAP_PROP_POS_FRAMES,0)# Rewind
nFrames=int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
else:
print('Bad OpenCV version. Please install opencv3')
sys.exit()
if nFrames==0:
print('No Frames to track. Check filename.')
sys.exit()
# Read Parameters or set to defaut values
if len(path)==0:
path='./'
if flag_im:
parameter_filename=path+'/' + name[-3:]+'seq_par.txt' # If list of image, default name different
else:
parameter_filename=path+'/' + name[:-4]+'_par.txt'
if not th[0]: th = read_parameter_file(parameter_filename)
# Set remaining Parameters and Save
th = set_default_parameter(th,width,height)
write_parameter_file(parameter_filename,th)
# Convert to int several par (defaut is float)
th[0]['ROIxmin']=np.maximum(int(th[0]['ROIxmin'])-1,0)
th[0]['ROIxmax']=int(th[0]['ROIxmax'])
th[0]['ROIymin']=np.maximum(int(th[0]['ROIymin'])-1,0)
th[0]['ROIymax']=int(th[0]['ROIymax'])
th[0]['noise_size']=int(th[0]['noise_size'])
th[0]['peak_neigh']=int(th[0]['peak_neigh'])
# if OUTPUT: write_parameter_file(parameter_filename,th)
if INFO:
print(th)
print("="*(65))
if SAVE_PLOT:
plot_folder=path+'/'+name[:-4]+'_img/'
if INFO:
print('Images will be saved in '+plot_folder)
if not(os.path.exists(plot_folder)): os.makedirs(plot_folder);
# Read time file to get time step
dt=np.ones(nFrames-1)
if os.path.isfile(tfile):
Time_stamp=np.loadtxt(tfile)
if Time_stamp.shape[0]==nFrames:
dt=np.diff(Time_stamp)
#%%
Pts=[]
# Read 2 first frames and initialize
if flag_web:
time0=time.time()
I0 = cv2.imread(flist[0],2) if flag_im else cap.read()[1];
I0=imProj(I0,proj)
I0f=imProc(I0,th)
if flag_web:
time1=time.time()
dt[0]=time1-time0
time0=time1
I1 = cv2.imread(flist[1],2) if flag_im else cap.read()[1];
I1=imProj(I1,proj)
I1f=imProc(I1,th)
# Read Mask file if any
mask_file=path+'/' + name[:-4]+'_mask.tif'
if os.path.isfile(mask_file):
mask=cv2.imread(mask_file,2)
mask=imProj(mask,proj)
mask=imProc(mask,th)
else:
mask=np.ones(I1f.shape,dtype='float32')
#print(mask_file,mask.shape)
h,w=np.shape(I0f)
#Initialize Background
B=np.zeros((h,w),dtype=np.float32)
if (th[0]['BG']==1)&(flag_web==0):
nbgstart=np.minimum(100,nFrames)
print('Initiating Background image over the firsts {:d} frames...'.format(nbgstart))
for i in range(nbgstart):
I = cv2.imread(flist[i],2) if flag_im else cap.read()[1];
I=imProj(I,proj)
I=imProc(I,th)
B=B+I/nbgstart
# Rewind video
if (imutils.is_cv2()) & (not flag_im):
cap.set(cv2.cv.CAP_PROP_POS_FRAMES,0);
cap.read()
cap.read()
elif (imutils.is_cv3()) & (not flag_im):
cap.set(cv2.CAP_PROP_POS_FRAMES,0);
cap.read()
cap.read()
# Initialize Plot
if PLOT>0:
init_plot(I0f.shape[1],I0f.shape[0])
if PAR:
queue = mp.Queue()
p = mp.Process(target=visualization_worker, args=(queue,))
p.start()
# Initialize Foreground
F0=I0f*mask-B
F1=I1f*mask-B
# get peaks and initialize centre, velocity and acc vectors
# Feature to track detection
F0f,x,y,z=feature_detection(F0,th)
C0=np.transpose(np.array([x,y]))
# Sort according to x
#C0=C0[np.argsort(C0[:,0],axis=0),:]
U0=np.zeros(C0.shape)
A0=np.zeros(C0.shape)
ID0=np.zeros(C0.shape[0])+np.nan # Vector of id traj
F1f,x,y,z=feature_detection(F1,th)
C1=np.transpose(np.array([x,y]))
#C1=C1[np.argsort(C1[:,0],axis=0),:]
U1=np.zeros(C1.shape)
A1=np.zeros(C1.shape)
# Replace threshold if auto and not shi and thomasi
if (th[0]['peak_th_auto']==1)&~(th[0]['peak_conv']==3):
th[0]['peak_th']=np.mean(F1f)+0.5*np.std(F1f)
# C1motion=C1
errU_filt=np.zeros(C1.shape[0])
id_traj=0
# Initialize lists
idgood=[]
errU = np.array([])
errU_th=2.
Xmot, Umot,Xmot_temp,Umot_temp = [np.array([]).reshape((-1,2)) for j in range(4)]
errmot, errmot_temp = [np.array([]) for j in range(2)]
#X0,X1,X1motion,X2,um,Umotion,a= [np.array([]).reshape(0,2) for i in range(7)]
# Vector of boundary known points
# BND=[]
# Initialize Point Tree structure
t0,t1=None,None
if len(C0)>0: t0=scp.cKDTree(C0)
if len(C1)>0: t1=scp.cKDTree(C1)
ns01,ns10,i1001=[np.array([]) for i in range(3)]
if (len(C0)>0)&(len(C1)>0):
ns01=t0.query(C1)
ns10=t1.query(C0)
# First check of
i1001=ns10[1][ns01[1]]==np.arange(C1.shape[0])
# Check if MotionModel File existing
MotionModelFile=False
if os.path.exists(mmfilename):
# Read File
mmdata = np.loadtxt(mmfilename,delimiter=' ')
# Interpolation on a grid made at the image size
mm_x, mm_y = np.meshgrid( range(I0.shape[1]),range(I0.shape[0]))
mm_U = griddata((mmdata[:,0],mmdata[:,1]), mmdata[:,2], (mm_x, mm_y), method='nearest')
mm_V = griddata((mmdata[:,0],mmdata[:,1]), mmdata[:,3], (mm_x, mm_y), method='nearest')
if mmdata.shape[1]>4:
mm_E = griddata((mmdata[:,0],mmdata[:,1]), mmdata[:,4], (mm_x, mm_y), method='nearest')
else:
mm_E = griddata((mmdata[:,0],mmdata[:,1]), np.zeros(mmdata[:,1].shape), (mm_x, mm_y), method='nearest')
MotionModelFile=True
if INFO:
print('! The Provided Motion Model File will be used !')
# Initialize averages
if AVERAGES:
Av_U=np.zeros(F0f.shape)
Av_V=np.zeros(F0f.shape)
Av_N=np.zeros(F0f.shape)
#====================Top Down Approach=========================================
N=np.arange(nFrames)
if ~flag_web:
DT=dt
for k in range(int(th[0]['vid_loop'])):
if np.mod(k,2)==0:
N=np.hstack((N,np.arange(nFrames-2,-1,-1))) # Vector of consecutive frames to read
DT=np.hstack((DT,-dt[::-1])) # Vector of consecutive times
else:
N=np.hstack((N,np.arange(1,nFrames))) # Vector of consecutive frames to read
DT=np.hstack((DT,dt[::1])) # Vector of consecutive times
dt=DT
#==============================================================================
# MAIN LOOP OVER VIDEO FRAMES #################################################
#nFrames=5
if INFO: print('0001 | Buffer frame...')
for i in range(2,len(N)):
# for i in range(2,399):
#print 'top'
t = time.time()
# PRE processing steps #################################################
if flag_im:
I2 = cv2.imread(flist[N[i]],2)
elif flag_web:
I2 = cap.read()[1];
else:
cap.set(1,N[i])
I2 = cap.read()[1];
if I2 is None:
if PAR and PLOT: p.join();
print('WARNING: Video reader get None type. Exiting at frame {:d}.'.format(N[i]))
break
if flag_web:
time1=time.time()
dt[i-1]=time1-time0
time0=time1
print('Webcam Framerate: {:1.1f} fps'.format(1./dt[i-1]))
I2=imProj(I2,proj)
I2f=imProc(I2,th)
F2 = I2f*mask-B
F2f,x,y,z=feature_detection(F2,th)
C2=np.transpose(np.array([x,y]))
#C2=C2[np.argsort(C2[:,0],axis=0),:]
U2=np.zeros(C0.shape)
A2=np.zeros(C0.shape)
# END PREPROCESSING #################################################
it=0
while it<=th[0]['motion_it']:
# PREDICTION from motion model
if (th[0]['motion']==1)&(len(idgood)+Xmot.shape[0]>1)&(len(C1)>0):
if len(X1)==0:
[Umotion,errU_filt,Xmot_temp,Umot_temp,errmot_temp]=Propagate_MotionModel_KdTree(C1,np.array([]).reshape(-1,2),np.array([]).reshape(-1,2),np.array([]),Xmot,Umot,errmot,th)
else:
[Umotion,errU_filt,Xmot_temp,Umot_temp,errmot_temp]=Propagate_MotionModel_KdTree(C1,X1[idgood,:],um[idgood,:],errU[idgood],Xmot,Umot,errmot,th)
else:
Umotion=np.zeros(C1.shape)
errU_filt=np.zeros(C1.shape[0])
# Start Iteration step #################################################
#if len(C2)==0:
# Initialize for each loop
#X0,X1,X1motion,X2,um,Umotion,a,t2,ns12,i2112= (np.array([[],[]]).T for i in range(10))
#idC2,errU,ID,ISgood,i_all,um= (np.array([]).T for i in range(6))
# Build new trees
if len(C1)>0: t1m=scp.cKDTree(C1+Umotion*dt[i-1]) # Points C1 moved by motion model # !!Check on original version
if len(C2)>0:
t2=scp.cKDTree(C2) # Points C2
else:
t2=None
if (len(C1)>0)&(len(C2)>0):
ns21=t2.query(C1+Umotion*dt[i-1]) # Nearest Neighboor link C1m -> C2
ns12=t1m.query(C2) # Nearest Neighboor link C2 -> C1m
i1221=ns12[1][ns21[1]]==np.arange(C1.shape[0]) # Check reversibility of link C2-> C1m [C1m -> C2 ]
i2112=ns21[1][ns12[1]]==np.arange(C2.shape[0]) # For next iteration
else:
i1221=[];i2112=[];ns12=[];
if (len(C0)>0)&(len(C1)>0)&(len(C2)>0):
i_all=i1001&i1221 # Keep only Unilateral associations on 3 steps 0 -> 1 -> 2
#print np.sum(i_all)/np.float(C1.shape[0])
# Update Trajectories positions
X0=C0[ns01[1][i_all],:]
X1=C1[i_all,:]
X2=C2[ns21[1][i_all],:]
# Build traj ID
ID=ID0[ns01[1][i_all]]
# Velocities
U0=(X1-X0)/dt[i-2]
U1=(X2-X1)/dt[i-1]
um=(U0+U1)/2.
if (dt[i-1]+dt[i-2])==0:
A=np.zeros(U0.shape)+np.nan
else:
A=(U1-U0)/(dt[i-1]+dt[i-2])*2.
#print i_all.shape,Umotion.shape,X1.shape,np.unique(i_all).shape
#print i_all
# Error in Motion Model prediction
#Umotion=(C1motion[i_all,:]-C1[i_all,:])/dt[n-1]
#plt.quiver(C0[i_all,0],C0[i_all,1],Umotion[:,0],Umotion[:,1])
# TO CHECK wether dt should appear or not in errU
errU=np.maximum(-10.,np.log10(np.amax((np.abs((Umotion[i_all,:]-um)*dt[i-1])),1)))
# Filtering outliers
ISgood=errU-errU_filt[i_all]<errU_th
# Evolution of threshold
errU_th=(th[0]['filter_time']*errU_th+th[0]['filter_std']*np.std(errU))/(th[0]['filter_time']+1.)
else:
i_all,ID,errU,ISgood=[[] for j in range(4)]
X0,X1,X2,um,A=[np.array([]).reshape(-1,2) for j in range(5)]
errU_th=2 # Reinitialize threshold
# Filter Outliers if necessary
if th[0]['filter']==1:
idgood=np.where(ISgood==1)[0]
idbad=np.where(ISgood==0)[0]
elif len(X1)>0:
idgood=np.arange(0,len(X1[:,1]))
idbad=[];
else:
idgood=[]
idbad=[]
# print id_traj
# break
if len(errU)>0:
infos= ' | Motion Model it %02d - %d pts - log Err. Av/Max %1.1f/%1.1f (px) ' % (it,len(idgood),np.mean(errU),np.mean(errU+errU_th))
else:
infos= ' | Motion Model it %02d - %d pts - log Err. Av/Max ??/%1.1f (px) ' % (it,0,errU_th)
if INFO: print(infos)
# if (PLOT==2) & (th[0]['motion_it']>0):
# col = np.sqrt(Umotion[idgood,1]**2+Umotion[idgood,0]**2); vel=Umotion[idgood,:];
# if PAR:
# # the last argument send a stop signal to the worker
# q=[I2,X2[idgood,:],vel,col,n,np.mean(errU),n<nFrames-1]N
# q=[I2f,X2[idgood,:],vel,col,n,np.mean(errU)]
# plot(q)
it+=1
# END Iteration step #################################################
#Replace Nan good ID by new values
if len(ID)>0:
idnan=np.where(np.isnan(ID[idgood]))[0]
if len(idnan)>0: # Associate new ID
ID[idgood[idnan]]=np.arange(0,len(idnan))+id_traj
id_traj=id_traj+len(idnan)
#if i==398: break
# Keep track of last motion model used
Xmot,Umot,errmot=Xmot_temp,Umot_temp,errmot_temp
# Save ID for next iteration
if len(C1)>0:
ID0=np.zeros(C1.shape[0])+np.nan
ID0[i_all]=ID
# If last loop, reset ID to 0
if (i==len(N)-nFrames) & ~(flag_web):
ID0[i_all]=np.arange(int(sum(i_all)))
id_traj=sum(i_all)
A=A+np.nan
# Save Frame ID Position Speed Acceleration
if (OUTPUT>0) & (i>=len(N)-nFrames+2) :
newPts=np.vstack(((np.zeros(len(idgood))+N[i]-np.sign(dt[i-1])).T,ID[idgood].T, X1[idgood,:].T, um[idgood,:].T,A[idgood,:].T, Umotion[idgood,:].T,errU[idgood].T)).T
Pts.append(newPts) # We use list for dynamic resizing of arrays
if AVERAGES & (i>=len(N)-nFrames+2) :
xi=np.uint16(X1[idgood,:])
Av_U[xi[:,1],xi[:,0]]=Av_U[xi[:,1],xi[:,0]]+um[idgood,0]
Av_V[xi[:,1],xi[:,0]]=Av_V[xi[:,1],xi[:,0]]+um[idgood,1]
Av_N[xi[:,1],xi[:,0]]=Av_N[xi[:,1],xi[:,0]]+1
# Plotting
if (PLOT>0):
if PAR==0:
if len(um)==0:
col = []; vel= [];
else:
if PLOT==1: col = np.sqrt(um[idgood,1]**2+um[idgood,0]**2); vel=um[idgood,:];
if PLOT==2: col = np.sqrt(Umotion[i_all,1]**2+Umotion[i_all,0]**2)[idgood]; vel=Umotion[i_all,:][idgood,:];
if PLOT==3: col = 10**errU_filt[i_all][idgood]; vel=Umotion[i_all,:][idgood,:];
if len(X1)>0: q=[I1f,X1[idgood,:],vel,col,N[i],np.mean(errU[idgood])]
if len(X1)==0: q=[I1f,[],vel,col,N[i],np.nan]
plot(q)
elif (queue.empty()) or (SAVE_PLOT==1): # Only plot when queue is empty or when the save order is given
if len(um)==0:
col = []; vel= [];
else:
if PLOT==1: col = np.sqrt(um[idgood,1]**2+um[idgood,0]**2); vel=um[idgood,:];
if PLOT==2: col = np.sqrt(Umotion[i_all,1]**2+Umotion[i_all,0]**2)[idgood]; vel=Umotion[i_all,:][idgood,:];
if PLOT==3: col = 10**errU_filt[i_all][idgood]; vel=Umotion[i_all,:][idgood,:];
# the last argument send a stop signal to the worker
if len(X1)>0: q=[I1f,X1[idgood,:],vel,col,N[i],np.mean(errU[idgood]),not(i==len(N)-1)]
if len(X1)==0: q=[I1f,[],vel,col,N[i],np.nan,not(i==len(N)-1)]
queue.put(q)
# Update Background
if th[0]['BG']:
r2=B-I2f
r=-np.float32(r2>th[0]['BGspeed'])*th[0]['BGspeed']+np.float32(r2<-th[0]['BGspeed'])*th[0]['BGspeed']-np.float32(np.abs(r2)<=th[0]['BGspeed'])*r2;
B=np.minimum(np.maximum(B+r,0),1)
# Print Infos
elapsed = time.time() - t
infos= '%04d + %d Pts (%d%% recovered) - Time %1.2fs ' % (N[i],C1.shape[0],len(idgood)*100./np.maximum(0.1,C1.shape[0]),elapsed)+"="*20
if INFO: print(infos)
# Prepare matrix for next iteration
# 1 -> 0
#A0=A1;
# KDTree
t0=t1;t1=t2;
ns01=ns12;
i1001=i2112
# 2 -> 1
#print len(C0),len(C1),len(C2),len(ns12)
C0=C1;C1=C2;U0=U1;U1=U2;I1=I2;I1f=I2f;
#break
############################ END of Main LOOPPP
#%%
#cv2.destroyAllWindows()
#if INFO: print '%04d | Buffer frame...' % (N[i+1])
if PAR and PLOT:
if INFO:
print("="*(65))
print('Waiting for visualization thread end...')
p.join()
p.terminate()
plt.close('all')
if INFO: print('End ! A total of {:d} objects have been tracked.'.format(id_traj))
#print OUTPUT
# SAVING ASCII ####################
if OUTPUT==1:
# Transform Pts into a numpy array
Pts=np.concatenate(Pts)
if not os.path.isdir(path+'/TracTrac/'):
os.mkdir(path+'/TracTrac/')
if flag_im:
output_filename=path+'/TracTrac/' + name[-3:]+'seq_track.txt' # If list of image, default name different
else:
output_filename=path+'/TracTrac/' + name[:-4]+'_track.txt'
if INFO:
print('Saving to ASCII file '+ output_filename +'...')
head='TracTrac v'+version +' \n Parameters: '+str(th[0])+'\n Frame ID x y Vx Vy Ax Ay Vx(prediction) Vy(prediction) Error(prediction)'
#newPts=np.vstack(((np.zeros(len(idgood))+n-1).T,ID[idgood].T, X1[idgood,:].T, um[idgood,:].T, Umotion[idgood,:].T,a[idgood,:].T,errU[idgood].T)).T
np.savetxt(output_filename,Pts,fmt=('%d','%d','%.3f','%.3f','%.5f','%.5f','%.4f','%.4f','%.4f','%.4f','%.3f'),delimiter=' ', newline='\n', header=head, comments='# ')
if INFO:
print('Raw tracking data saved as ASCII file!')
# SAVING HDF5 ####################
if OUTPUT==2:
# Transform Pts into a numpy array
Pts=np.concatenate(Pts)
print(Pts.shape)
if not os.path.isdir(path+'/TracTrac/'):
os.mkdir(path+'/TracTrac/')
if flag_im:
output_filename=path+'/TracTrac/' + name[-3:]+'seq_track.hdf5' # If list of image, default name different
else:
output_filename=path+'/TracTrac/' + name[:-4]+'_track.hdf5'
if INFO:
print('Saving to binary file '+ output_filename +'...')
f = h5py.File(output_filename,'w')
f.attrs['version']='HDF5 file made with TracTrac Python v'+version
f.attrs['date']=time.strftime("%d/%m/%Y")
f.attrs['nFrames']=nFrames
f.attrs['size']=I0f.shape
for items in th[0].keys(): f.attrs['th:'+items]=th[0][items]
f.create_dataset("Frame", data=np.uint16(Pts[:,0]))
f.create_dataset("Id", data=np.uint16(Pts[:,1]))
f.create_dataset("x", data=np.float32(Pts[:,2:4]))
f.create_dataset("u", data=np.float32(Pts[:,4:6]))
f.create_dataset("a", data=np.float32(Pts[:,6:8]))
f.create_dataset("u_motion", data=np.float32(Pts[:,8:10]))
f.create_dataset("err_motion", data=np.uint32(Pts[:,10]))
f.close()
if INFO:
print('Raw tracking data saved as HDF5 file!')
if AVERAGES:
if not os.path.isdir(path+'/TracTrac/'):
os.mkdir(path+'/TracTrac/')
if flag_im:
output_filename=path+'/TracTrac/' + name[-3:]+'seq' # If list of image, default name different
else:
output_filename=path+'/TracTrac/' + name[:-4]
Av_N[Av_N==0]=np.nan
Ui=Av_U/Av_N
cv2.imwrite(output_filename+'_Ux_[{:1.3e},{:1.3e}].tif'.format(np.nanmin(Ui),np.nanmax(Ui)),
np.float32((Ui-np.nanmin(Ui))/(np.nanmax(Ui)-np.nanmin(Ui))))
Ui=Av_V/Av_N
cv2.imwrite(output_filename+'_Uy_[{:1.3e},{:1.3e}].tif'.format(np.nanmin(Ui),np.nanmax(Ui)),
np.float32((Ui-np.nanmin(Ui))/(np.nanmax(Ui)-np.nanmin(Ui))))
Ui=np.sqrt(Av_U**2.+Av_V**2.)/Av_N
cv2.imwrite(output_filename+'_Umag_[{:1.3e},{:1.3e}].tif'.format(np.nanmin(Ui),np.nanmax(Ui)),
np.float32((Ui-np.nanmin(Ui))/(np.nanmax(Ui)-np.nanmin(Ui))))
Ui=Av_N
cv2.imwrite(output_filename+'_N_[{:1.3e},{:1.3e}].tif'.format(np.nanmin(Ui),np.nanmax(Ui)),
np.float32((Ui-np.nanmin(Ui))/(np.nanmax(Ui)-np.nanmin(Ui))))
if INFO:
print('Averages saved as tiff files!')
# if OUTPUT_PP:
# #pdb.set_trace()
# output_filename=path+'/' + name[:-4]+'_post.txt'
# print 'PostProcessing will be saved to '+ output_filename +'...'
# post_save(output_filename,Pts,th)
# print 'Saved !'
# print "="*(65)
print("="*(65))
return Pts,th
#%% Run as a script
if __name__ == "__main__":
global SAVE_PLOT,Cmin,Cmax,alpha,INFO,PAR
parser = argparse.ArgumentParser(description='TRACTRAC v'+version+' - Joris Heyman')
parser.add_argument('-f','--file', type=str, help='Video Filename to track',default='../Sample_videos/videotest.avi')
parser.add_argument('-tf','--tfile', type=str, help='Time of frame file',default='')
parser.add_argument('-mmf','--motionmodelfile', type=str, help='Motion Model file',default='')
parser.add_argument('-a','--averages', help='Save average velocity maps', action='store_true',default=False)
parser.add_argument('-o','--output', type=int, help='Save tracking results in a file ASCII (1) or HDF5 (2)',default=0)
parser.add_argument('-opp','--outputpp', help='Save Post Processing results in a file', action='store_true',default=False)
parser.add_argument('-s','--silent',help='No tracking infos', action='store_false',default=True)
# Plotting Options
parser.add_argument('-p','--plot', type=int,help='Live plot of tracking results', default=0)
parser.add_argument('-sp','--saveplot', help='Save plots in image sequence', action='store_true')
parser.add_argument('-cmin','--cmin', type=float, help='Minimum velocity (px/frame) for plotting',default=0.)
parser.add_argument('-cmax','--cmax', type=float, help='Maximum velocity (px/frame) for plotting',default=3.)
parser.add_argument('-ca','--calpha', type=float, help='Alpha value for arrows',default=1.0)
parser.add_argument('-par','--parallel', type=int,help='Visualization in a Parallel Thread', default=0)
args = parser.parse_args()
filename=args.file
tfile=args.tfile
mmfilename=args.motionmodelfile
PLOT=args.plot
AVERAGES=args.averages
OUTPUT=args.output
OUTPUT_PP=args.outputpp
INFO=args.silent
SAVE_PLOT=args.saveplot
Cmin=args.cmin
Cmax=args.cmax
alpha=args.calpha
PAR=args.parallel
th=[{}]
#%%
Pts,th=tractrac(filename,th,mmfilename,tfile,PLOT,OUTPUT)
|
self_contained_components.py | #!/usr/bin/env python
# Lint as: python3
"""Functions to run individual GRR components during self-contained testing."""
import atexit
import collections
import os
import platform
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
from typing import Dict, Iterable, List, Optional, Union, Text
import portpicker
from google.protobuf import text_format
from grr_response_core.lib import package
from grr_response_test.lib import api_helpers
from fleetspeak.src.client.daemonservice.proto.fleetspeak_daemonservice import config_pb2 as daemonservice_config_pb2
from fleetspeak.src.client.generic.proto.fleetspeak_client_generic import config_pb2 as client_config_pb2
from fleetspeak.src.common.proto.fleetspeak import system_pb2
from fleetspeak.src.config.proto.fleetspeak_config import config_pb2
from fleetspeak.src.server.grpcservice.proto.fleetspeak_grpcservice import grpcservice_pb2
from fleetspeak.src.server.proto.fleetspeak_server import server_pb2
from fleetspeak.src.server.proto.fleetspeak_server import services_pb2
ComponentOptions = Dict[str, Union[int, str]]
class Error(Exception):
"""Module-specific base error class."""
class ConfigInitializationError(Error):
"""Raised when a self-contained config can't be written."""
def _ComponentOptionsToArgs(options: Optional[ComponentOptions]) -> List[str]:
if options is None:
return []
args = []
for k, v in options.items():
args.extend(["-p", "%s=%s" % (k, v)])
return args
def _GetServerComponentArgs(config_path: str) -> List[str]:
"""Returns a set of command line arguments for server components.
Args:
config_path: Path to a config path generated by
self_contained_config_writer.
Returns:
An iterable with command line arguments to use.
"""
primary_config_path = package.ResourcePath(
"grr-response-core", "install_data/etc/grr-server.yaml")
secondary_config_path = package.ResourcePath(
"grr-response-test", "grr_response_test/test_data/grr_test.yaml")
return [
"--config",
primary_config_path,
"--secondary_configs",
",".join([secondary_config_path, config_path]),
"-p",
"Monitoring.http_port=%d" % portpicker.pick_unused_port(),
"-p",
"AdminUI.webauth_manager=NullWebAuthManager",
]
def _GetRunEndToEndTestsArgs(
client_id,
server_config_path,
tests: Optional[Iterable[str]] = None,
manual_tests: Optional[Iterable[str]] = None) -> List[str]:
"""Returns arguments needed to configure run_end_to_end_tests process.
Args:
client_id: String with a client id pointing to an already running client.
server_config_path: Path to the server configuration file.
tests: (Optional) List of tests to run.
manual_tests: (Optional) List of manual tests to not skip.
Returns:
An iterable with command line arguments.
"""
port = api_helpers.GetAdminUIPortFromConfig(server_config_path)
api_endpoint = "http://localhost:%d" % port
args = [
"--api_endpoint",
api_endpoint,
"--api_user",
"admin",
"--api_password",
"admin",
"--client_id",
client_id,
"--ignore_test_context",
"True",
]
if tests is not None:
args += ["--whitelisted_tests", ",".join(tests)]
if manual_tests is not None:
args += ["--manual_tests", ",".join(manual_tests)]
return args
def _StartBinary(binary_path: str, args: List[str]) -> subprocess.Popen:
"""Starts a new process with a given binary and args.
Started subprocess will be killed automatically on exit.
Args:
binary_path: A binary to run.
args: An iterable with program arguments (not containing the program
executable).
Returns:
Popen object corresponding to a started process.
"""
popen_args = [binary_path] + args
print("Starting binary: " + " ".join(popen_args))
process = subprocess.Popen(
popen_args, bufsize=0, stdout=None, stderr=subprocess.STDOUT)
def KillOnExit():
if process.poll() is None:
process.kill()
process.wait()
atexit.register(KillOnExit)
return process
def _StartComponent(main_package: str, args: List[str]) -> subprocess.Popen:
"""Starts a new process with a given component.
This starts a Python interpreter with a "-u" argument (to turn off output
buffering) and with a "-m" argument followed by the main package name, thus
effectively executing the main() function of a given package.
Args:
main_package: Main package path.
args: An iterable with program arguments (not containing the program
executable).
Returns:
Popen object corresponding to a started process.
"""
popen_args = [sys.executable, "-u", "-m", main_package] + args
print("Starting %s component: %s" % (main_package, " ".join(popen_args)))
process = subprocess.Popen(
popen_args, bufsize=0, stdout=None, stderr=subprocess.STDOUT)
print("Component %s pid: %d" % (main_package, process.pid))
def KillOnExit():
if process.poll() is None:
print("Killing %s." % main_package)
process.kill()
process.wait()
atexit.register(KillOnExit)
return process
GRRConfigs = collections.namedtuple("GRRConfigs", [
"server_config",
"client_config",
])
def InitGRRConfigs(mysql_database: str,
mysql_username: Optional[str] = None,
mysql_password: Optional[str] = None,
logging_path: Optional[str] = None,
osquery_path: Optional[str] = None,
with_fleetspeak: bool = False) -> GRRConfigs:
"""Initializes server and client config files."""
# Create 2 temporary files to contain server and client configuration files
# that we're about to generate.
#
# TODO(user): migrate to TempFilePath as soon grr.test_lib is moved to
# grr_response_test.
fd, built_server_config_path = tempfile.mkstemp(".yaml")
os.close(fd)
print("Using temp server config path: %s" % built_server_config_path)
fd, built_client_config_path = tempfile.mkstemp(".yaml")
os.close(fd)
print("Using temp client config path: %s" % built_client_config_path)
def CleanUpConfigs():
os.remove(built_server_config_path)
os.remove(built_client_config_path)
atexit.register(CleanUpConfigs)
# Generate server and client configs.
config_writer_flags = [
"--dest_server_config_path",
built_server_config_path,
"--dest_client_config_path",
built_client_config_path,
"--config_mysql_database",
mysql_database,
]
if mysql_username is not None:
config_writer_flags.extend(["--config_mysql_username", mysql_username])
if mysql_password is not None:
config_writer_flags.extend(["--config_mysql_password", mysql_password])
if logging_path is not None:
config_writer_flags.extend(["--config_logging_path", logging_path])
if osquery_path is not None:
config_writer_flags.extend(["--config_osquery_path", osquery_path])
if with_fleetspeak:
config_writer_flags.extend(["--config_with_fleetspeak"])
p = _StartComponent(
"grr_response_test.lib.self_contained_config_writer",
config_writer_flags)
if p.wait() != 0:
raise ConfigInitializationError("ConfigWriter execution failed: {}".format(
p.returncode))
return GRRConfigs(built_server_config_path, built_client_config_path)
FleetspeakConfigs = collections.namedtuple("FleetspeakConfigs", [
"server_components_config",
"server_services_config",
"client_config",
])
def InitFleetspeakConfigs(
grr_configs: GRRConfigs,
mysql_database: str,
mysql_username: Optional[str] = None,
mysql_password: Optional[str] = None) -> FleetspeakConfigs:
"""Initializes Fleetspeak server and client configs."""
fs_frontend_port, fs_admin_port = api_helpers.GetFleetspeakPortsFromConfig(
grr_configs.server_config)
mysql_username = mysql_username or ""
mysql_password = mysql_password or ""
temp_root = tempfile.mkdtemp(suffix="_fleetspeak")
def TempPath(*args):
return os.path.join(temp_root, *args)
cp = config_pb2.Config(configuration_name="Self-contained testing")
cp.components_config.mysql_data_source_name = "%s:%s@tcp(127.0.0.1:3306)/%s" % (
mysql_username, mysql_password, mysql_database)
cp.components_config.https_config.listen_address = "localhost:%d" % portpicker.pick_unused_port(
)
# TODO(user): Use streaming connections by default. At the moment
# a few tests are failing with MySQL errors when streaming is used.
cp.components_config.https_config.disable_streaming = True
cp.components_config.admin_config.listen_address = ("localhost:%d" %
fs_admin_port)
cp.public_host_port.append(cp.components_config.https_config.listen_address)
cp.server_component_configuration_file = TempPath("server.config")
cp.trusted_cert_file = TempPath("trusted_cert.pem")
cp.trusted_cert_key_file = TempPath("trusted_cert_key.pem")
cp.server_cert_file = TempPath("server_cert.pem")
cp.server_cert_key_file = TempPath("server_cert_key.pem")
cp.linux_client_configuration_file = TempPath("linux_client.config")
cp.windows_client_configuration_file = TempPath("windows_client.config")
cp.darwin_client_configuration_file = TempPath("darwin_client.config")
built_configurator_config_path = TempPath("configurator.config")
with open(built_configurator_config_path, mode="w", encoding="utf-8") as fd:
fd.write(text_format.MessageToString(cp))
p = _StartBinary(
"fleetspeak-config",
["--logtostderr", "--config", built_configurator_config_path])
if p.wait() != 0:
raise ConfigInitializationError(
"fleetspeak-config execution failed: {}".format(p.returncode))
# Adjust client config.
with open(
cp.linux_client_configuration_file, mode="r", encoding="utf-8") as fd:
conf_content = fd.read()
conf = text_format.Parse(conf_content, client_config_pb2.Config())
conf.filesystem_handler.configuration_directory = temp_root
conf.filesystem_handler.state_file = TempPath("client.state")
with open(
cp.linux_client_configuration_file, mode="w", encoding="utf-8") as fd:
fd.write(text_format.MessageToString(conf))
# Write client services configuration.
service_conf = system_pb2.ClientServiceConfig(name="GRR", factory="Daemon")
payload = daemonservice_config_pb2.Config()
payload.argv.extend([
sys.executable, "-u", "-m",
"grr_response_client.grr_fs_client",
"--config", grr_configs.client_config
])
# TODO(user): remove this condition when Fleetspeak is used as a nanny
# on all platforms.
if platform.system() == "Windows":
payload.monitor_heartbeats = True
payload.heartbeat_unresponsive_grace_period_seconds = 45
payload.heartbeat_unresponsive_kill_period_seconds = 15
service_conf.config.Pack(payload)
os.mkdir(TempPath("textservices"))
with open(
TempPath("textservices", "GRR.textproto"), mode="w",
encoding="utf-8") as fd:
fd.write(text_format.MessageToString(service_conf))
# Server services configuration.
service_config = services_pb2.ServiceConfig(name="GRR", factory="GRPC")
grpc_config = grpcservice_pb2.Config(
target="localhost:%d" % fs_frontend_port, insecure=True)
service_config.config.Pack(grpc_config)
server_conf = server_pb2.ServerConfig(services=[service_config])
server_conf.broadcast_poll_time.seconds = 1
built_server_services_config_path = TempPath("server.services.config")
with open(
built_server_services_config_path, mode="w", encoding="utf-8") as fd:
fd.write(text_format.MessageToString(server_conf))
return FleetspeakConfigs(cp.server_component_configuration_file,
built_server_services_config_path,
cp.linux_client_configuration_file)
def StartServerProcesses(
grr_configs: GRRConfigs,
fleetspeak_configs: Optional[FleetspeakConfigs] = None,
) -> List[subprocess.Popen]:
"""Starts GRR server processes (optionally behind Fleetspeak frontend)."""
def Args():
return _GetServerComponentArgs(grr_configs.server_config)
if fleetspeak_configs is None:
return [
_StartComponent(
"grr_response_server.gui.admin_ui",
Args()),
_StartComponent(
"grr_response_server.bin.frontend",
Args()),
_StartComponent(
"grr_response_server.bin.worker",
Args()),
]
else:
return [
_StartBinary("fleetspeak-server", [
"-logtostderr",
"-components_config",
fleetspeak_configs.server_components_config,
"-services_config",
fleetspeak_configs.server_services_config,
]),
_StartComponent(
"grr_response_server.bin.fleetspeak_frontend",
Args()),
_StartComponent(
"grr_response_server.gui.admin_ui",
Args()),
_StartComponent(
"grr_response_server.bin.worker",
Args()),
]
def StartClientProcess(grr_configs: GRRConfigs,
fleetspeak_configs: Optional[FleetspeakConfigs] = None,
verbose: bool = False) -> subprocess.Popen:
"""Starts a GRR client or Fleetspeak client configured to run GRR."""
if fleetspeak_configs is None:
return _StartComponent(
"grr_response_client.client",
["--config", grr_configs.client_config] +
(["--verbose"] if verbose else []))
else:
return _StartBinary("fleetspeak-client", [
"-logtostderr",
"-std_forward",
"-config",
fleetspeak_configs.client_config,
])
def RunEndToEndTests(client_id: str,
server_config_path: str,
tests: Optional[Iterable[str]] = None,
manual_tests: Optional[Iterable[str]] = None):
"""Runs end to end tests on a given client."""
p = _StartComponent(
"grr_response_test.run_end_to_end_tests",
_GetServerComponentArgs(server_config_path) + _GetRunEndToEndTestsArgs(
client_id, server_config_path, tests=tests,
manual_tests=manual_tests))
if p.wait() != 0:
raise RuntimeError("RunEndToEndTests execution failed.")
def RunBuildTemplate(server_config_path: str,
component_options: Optional[ComponentOptions] = None,
version_ini: Optional[str] = None) -> str:
"""Runs end to end tests on a given client."""
output_dir = tempfile.mkdtemp()
def CleanUpTemplate():
shutil.rmtree(output_dir)
atexit.register(CleanUpTemplate)
options = dict(component_options or {})
if version_ini:
fd, version_ini_path = tempfile.mkstemp(".ini")
try:
os.write(fd, version_ini.encode("ascii"))
finally:
os.close(fd)
options["ClientBuilder.version_ini_path"] = version_ini_path
p = _StartComponent(
"grr_response_client_builder.client_build",
_GetServerComponentArgs(server_config_path) +
_ComponentOptionsToArgs(options) + ["build", "--output", output_dir])
if p.wait() != 0:
raise RuntimeError("RunBuildTemplate execution failed.")
return os.path.join(output_dir, os.listdir(output_dir)[0])
def RunRepackTemplate(
server_config_path: str,
template_path: str,
component_options: Optional[ComponentOptions] = None) -> str:
"""Runs 'grr_client_builder repack' to repack a template."""
output_dir = tempfile.mkdtemp()
def CleanUpInstaller():
shutil.rmtree(output_dir)
atexit.register(CleanUpInstaller)
p = _StartComponent(
"grr_response_client_builder.client_build",
_GetServerComponentArgs(server_config_path) +
_ComponentOptionsToArgs(component_options) +
["repack", "--template", template_path, "--output_dir", output_dir])
if p.wait() != 0:
raise RuntimeError("RunRepackTemplate execution failed.")
# Repacking may apparently generate more than one file. Just select the
# biggest one: it's guaranteed to be the template.
paths = [os.path.join(output_dir, fname) for fname in os.listdir(output_dir)]
sizes = [os.path.getsize(p) for p in paths]
_, biggest_path = max(zip(sizes, paths))
return biggest_path
def RunUploadExe(server_config_path: str,
exe_path: str,
platform_str: str,
component_options: Optional[ComponentOptions] = None) -> str:
"""Runs 'grr_config_upater upload_exe' to upload a binary to GRR."""
p = _StartComponent(
"grr_response_server.bin.config_updater",
_GetServerComponentArgs(server_config_path) +
_ComponentOptionsToArgs(component_options) + [
"upload_exe", "--file", exe_path, "--platform", platform_str,
"--upload_subdirectory", "test"
])
if p.wait() != 0:
raise RuntimeError("RunUploadExe execution failed.")
return "%s/test/%s" % (platform_str, os.path.basename(exe_path))
_PROCESS_CHECK_INTERVAL = 0.1
def _DieIfSubProcessDies(processes: Iterable[subprocess.Popen],
already_dead_event: threading.Event):
"""Synchronously waits for processes and dies if one dies."""
while True:
for p in processes:
if p.poll() not in [None, 0]:
# Prevent a double kill. When the main process exits, it kills the
# children. We don't want a child's death to cause a SIGTERM being
# sent to a process that's already exiting.
if already_dead_event.is_set():
return
# DieIfSubProcessDies runs in a background thread, raising an exception
# will just kill the thread while what we want is to fail the whole
# process.
print("Subprocess %s died unexpectedly. Killing main process..." %
p.pid)
for kp in processes:
try:
os.kill(kp.pid, signal.SIGTERM)
except OSError:
pass
# sys.exit only exits a thread when called from a thread.
# Killing self with SIGTERM to ensure the process runs necessary
# cleanups before exiting.
os.kill(os.getpid(), signal.SIGTERM)
time.sleep(_PROCESS_CHECK_INTERVAL)
def DieIfSubProcessDies(
processes: Iterable[subprocess.Popen]) -> threading.Thread:
"""Kills the process if any of given processes dies.
This function is supposed to run in a background thread and monitor provided
processes to ensure they don't die silently.
Args:
processes: An iterable with multiprocessing.Process instances.
Returns:
Background thread started to monitor the processes.
"""
already_dead_event = threading.Event()
t = threading.Thread(
target=_DieIfSubProcessDies, args=(processes, already_dead_event))
t.daemon = True
t.start()
def PreventDoubleDeath():
already_dead_event.set()
atexit.register(PreventDoubleDeath)
return t
def RunApiShellRawAccess(config: Text, exec_code: Text) -> None:
"""Runs exec_code in the API shell."""
p = _StartComponent(
"grr_response_server.bin."
"api_shell_raw_access",
["--config", config, "--exec_code", exec_code],
)
if p.wait() != 0:
raise Exception("api_shell_raw_access execution failed: {}".format(
p.returncode))
|
perf.py | #!/usr/bin/python3
import argparse
import clickhouse_driver
import itertools
import functools
import math
import os
import pprint
import random
import re
import statistics
import string
import sys
import time
import traceback
import logging
import xml.etree.ElementTree as et
from threading import Thread
from scipy import stats
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(module)s: %(message)s', level='WARNING')
total_start_seconds = time.perf_counter()
stage_start_seconds = total_start_seconds
def reportStageEnd(stage):
global stage_start_seconds, total_start_seconds
current = time.perf_counter()
print(f'stage\t{stage}\t{current - stage_start_seconds:.3f}\t{current - total_start_seconds:.3f}')
stage_start_seconds = current
def tsv_escape(s):
return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','')
parser = argparse.ArgumentParser(description='Run performance test.')
# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set.
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
parser.add_argument('--host', nargs='*', default=['localhost'], help="Space-separated list of server hostname(s). Corresponds to '--port' options.")
parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated list of server port(s). Corresponds to '--host' options.")
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.')
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.')
parser.add_argument('--keep-created-tables', action='store_true', help="Don't drop the created tables after the test.")
parser.add_argument('--use-existing-tables', action='store_true', help="Don't create or drop the tables, use the existing ones instead.")
args = parser.parse_args()
reportStageEnd('start')
test_name = os.path.splitext(os.path.basename(args.file[0].name))[0]
tree = et.parse(args.file[0])
root = tree.getroot()
reportStageEnd('parse')
# Process query parameters
subst_elems = root.findall('substitutions/substitution')
available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... }
for e in subst_elems:
available_parameters[e.find('name').text] = [v.text for v in e.findall('values/value')]
# Takes parallel lists of templates, substitutes them with all combos of
# parameters. The set of parameters is determined based on the first list.
# Note: keep the order of queries -- sometimes we have DROP IF EXISTS
# followed by CREATE in create queries section, so the order matters.
def substitute_parameters(query_templates, other_templates = []):
query_results = []
other_results = [[]] * (len(other_templates))
for i, q in enumerate(query_templates):
keys = set(n for _, n, _, _ in string.Formatter().parse(q) if n)
values = [available_parameters[k] for k in keys]
combos = itertools.product(*values)
for c in combos:
with_keys = dict(zip(keys, c))
query_results.append(q.format(**with_keys))
for j, t in enumerate(other_templates):
other_results[j].append(t[i].format(**with_keys))
if len(other_templates):
return query_results, other_results
else:
return query_results
# Build a list of test queries, substituting parameters to query templates,
# and reporting the queries marked as short.
test_queries = []
is_short = []
for e in root.findall('query'):
new_queries, [new_is_short] = substitute_parameters([e.text], [[e.attrib.get('short', '0')]])
test_queries += new_queries
is_short += [eval(s) for s in new_is_short]
assert(len(test_queries) == len(is_short))
# If we're given a list of queries to run, check that it makes sense.
for i in args.queries_to_run or []:
if i < 0 or i >= len(test_queries):
print(f'There is no query no. {i} in this test, only [{0}-{len(test_queries) - 1}] are present')
exit(1)
# If we're only asked to print the queries, do that and exit.
if args.print_queries:
for i in args.queries_to_run or range(0, len(test_queries)):
print(test_queries[i])
exit(0)
# Print short queries
for i, s in enumerate(is_short):
if s:
print(f'short\t{i}')
# If we're only asked to print the settings, do that and exit. These are settings
# for clickhouse-benchmark, so we print them as command line arguments, e.g.
# '--max_memory_usage=10000000'.
if args.print_settings:
for s in root.findall('settings/*'):
print(f'--{s.tag}={s.text}')
exit(0)
# Skip long tests
if not args.long:
for tag in root.findall('.//tag'):
if tag.text == 'long':
print('skipped\tTest is tagged as long.')
sys.exit(0)
# Print report threshold for the test if it is set.
ignored_relative_change = 0.05
if 'max_ignored_relative_change' in root.attrib:
ignored_relative_change = float(root.attrib["max_ignored_relative_change"])
print(f'report-threshold\t{ignored_relative_change}')
reportStageEnd('before-connect')
# Open connections
servers = [{'host': host or args.host[0], 'port': port or args.port[0]} for (host, port) in itertools.zip_longest(args.host, args.port)]
# Force settings_is_important to fail queries on unknown settings.
all_connections = [clickhouse_driver.Client(**server, settings_is_important=True) for server in servers]
for i, s in enumerate(servers):
print(f'server\t{i}\t{s["host"]}\t{s["port"]}')
reportStageEnd('connect')
if not args.use_existing_tables:
# Run drop queries, ignoring errors. Do this before all other activity,
# because clickhouse_driver disconnects on error (this is not configurable),
# and the new connection loses the changes in settings.
drop_query_templates = [q.text for q in root.findall('drop_query')]
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
try:
c.execute(q)
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
except:
pass
reportStageEnd('drop-1')
# Apply settings.
settings = root.findall('settings/*')
for conn_index, c in enumerate(all_connections):
for s in settings:
# requires clickhouse-driver >= 1.1.5 to accept arbitrary new settings
# (https://github.com/mymarilyn/clickhouse-driver/pull/142)
c.settings[s.tag] = s.text
reportStageEnd('settings')
# Check tables that should exist. If they don't exist, just skip this test.
tables = [e.text for e in root.findall('preconditions/table_exists')]
for t in tables:
for c in all_connections:
try:
res = c.execute("select 1 from {} limit 1".format(t))
except:
exception_message = traceback.format_exception_only(*sys.exc_info()[:2])[-1]
skipped_message = ' '.join(exception_message.split('\n')[:2])
print(f'skipped\t{tsv_escape(skipped_message)}')
sys.exit(0)
reportStageEnd('preconditions')
if not args.use_existing_tables:
# Run create and fill queries. We will run them simultaneously for both
# servers, to save time. The weird XML search + filter is because we want to
# keep the relative order of elements, and etree doesn't support the
# appropriate xpath query.
create_query_templates = [q.text for q in root.findall('./*')
if q.tag in ('create_query', 'fill_query')]
create_queries = substitute_parameters(create_query_templates)
# Disallow temporary tables, because the clickhouse_driver reconnects on
# errors, and temporary tables are destroyed. We want to be able to continue
# after some errors.
for q in create_queries:
if re.search('create temporary table', q, flags=re.IGNORECASE):
print(f"Temporary tables are not allowed in performance tests: '{q}'",
file = sys.stderr)
sys.exit(1)
def do_create(connection, index, queries):
for q in queries:
connection.execute(q)
print(f'create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}')
threads = [
Thread(target = do_create, args = (connection, index, create_queries))
for index, connection in enumerate(all_connections)]
for t in threads:
t.start()
for t in threads:
t.join()
reportStageEnd('create')
# By default, test all queries.
queries_to_run = range(0, len(test_queries))
if args.max_queries:
# If specified, test a limited number of queries chosen at random.
queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries))
if args.queries_to_run:
# Run the specified queries.
queries_to_run = args.queries_to_run
# Run test queries.
profile_total_seconds = 0
for query_index in queries_to_run:
q = test_queries[query_index]
query_prefix = f'{test_name}.query{query_index}'
# We have some crazy long queries (about 100kB), so trim them to a sane
# length. This means we can't use query text as an identifier and have to
# use the test name + the test-wide query index.
query_display_name = q
if len(query_display_name) > 1000:
query_display_name = f'{query_display_name[:1000]}...({query_index})'
print(f'display-name\t{query_index}\t{tsv_escape(query_display_name)}')
# Prewarm: run once on both servers. Helps to bring the data into memory,
# precompile the queries, etc.
# A query might not run on the old server if it uses a function added in the
# new one. We want to run them on the new server only, so that the PR author
# can ensure that the test works properly. Remember the errors we had on
# each server.
query_error_on_connection = [None] * len(all_connections);
for conn_index, c in enumerate(all_connections):
try:
prewarm_id = f'{query_prefix}.prewarm0'
# Will also detect too long queries during warmup stage
res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': 10})
print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}')
except KeyboardInterrupt:
raise
except:
# FIXME the driver reconnects on error and we lose settings, so this
# might lead to further errors or unexpected behavior.
query_error_on_connection[conn_index] = traceback.format_exc();
continue
# Report all errors that ocurred during prewarm and decide what to do next.
# If prewarm fails for the query on all servers -- skip the query and
# continue testing the next query.
# If prewarm fails on one of the servers, run the query on the rest of them.
no_errors = []
for i, e in enumerate(query_error_on_connection):
if e:
print(e, file = sys.stderr)
else:
no_errors.append(i)
if len(no_errors) == 0:
continue
elif len(no_errors) < len(all_connections):
print(f'partial\t{query_index}\t{no_errors}')
this_query_connections = [all_connections[index] for index in no_errors]
# Now, perform measured runs.
# Track the time spent by the client to process this query, so that we can
# notice the queries that take long to process on the client side, e.g. by
# sending excessive data.
start_seconds = time.perf_counter()
server_seconds = 0
profile_seconds = 0
run = 0
# Arrays of run times for each connection.
all_server_times = []
for conn_index, c in enumerate(this_query_connections):
all_server_times.append([])
while True:
run_id = f'{query_prefix}.run{run}'
for conn_index, c in enumerate(this_query_connections):
try:
res = c.execute(q, query_id = run_id)
except Exception as e:
# Add query id to the exception to make debugging easier.
e.args = (run_id, *e.args)
e.message = run_id + ': ' + e.message
raise
elapsed = c.last_query.elapsed
all_server_times[conn_index].append(elapsed)
server_seconds += elapsed
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}')
if elapsed > 10:
# Stop processing pathologically slow queries, to avoid timing out
# the entire test task. This shouldn't really happen, so we don't
# need much handling for this case and can just exit.
print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr)
exit(2)
# Be careful with the counter, after this line it's the next iteration
# already.
run += 1
# Try to run any query for at least the specified number of times,
# before considering other stop conditions.
if run < args.runs:
continue
# For very short queries we have a special mode where we run them for at
# least some time. The recommended lower bound of run time for "normal"
# queries is about 0.1 s, and we run them about 10 times, giving the
# time per query per server of about one second. Use this value as a
# reference for "short" queries.
if is_short[query_index]:
if server_seconds >= 2 * len(this_query_connections):
break
# Also limit the number of runs, so that we don't go crazy processing
# the results -- 'eqmed.sql' is really suboptimal.
if run >= 500:
break
else:
if run >= args.runs:
break
client_seconds = time.perf_counter() - start_seconds
print(f'client-time\t{query_index}\t{client_seconds}\t{server_seconds}')
# Run additional profiling queries to collect profile data, but only if test times appeared to be different.
# We have to do it after normal runs because otherwise it will affect test statistics too much
if len(all_server_times) != 2:
continue
if len(all_server_times[0]) < 3:
# Don't fail if for some reason there are not enough measurements.
continue
pvalue = stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue
median = [statistics.median(t) for t in all_server_times]
# Keep this consistent with the value used in report. Should eventually move
# to (median[1] - median[0]) / min(median), which is compatible with "times"
# difference we use in report (max(median) / min(median)).
relative_diff = (median[1] - median[0]) / median[0]
print(f'diff\t{query_index}\t{median[0]}\t{median[1]}\t{relative_diff}\t{pvalue}')
if abs(relative_diff) < ignored_relative_change or pvalue > 0.05:
continue
# Perform profile runs for fixed amount of time. Don't limit the number
# of runs, because we also have short queries.
profile_start_seconds = time.perf_counter()
run = 0
while time.perf_counter() - profile_start_seconds < args.profile_seconds:
run_id = f'{query_prefix}.profile{run}'
for conn_index, c in enumerate(this_query_connections):
try:
res = c.execute(q, query_id = run_id, settings = {'query_profiler_real_time_period_ns': 10000000})
print(f'profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}')
except Exception as e:
# Add query id to the exception to make debugging easier.
e.args = (run_id, *e.args)
e.message = run_id + ': ' + e.message
raise
run += 1
profile_total_seconds += time.perf_counter() - profile_start_seconds
print(f'profile-total\t{profile_total_seconds}')
reportStageEnd('run')
# Run drop queries
if not args.keep_created_tables and not args.use_existing_tables:
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
c.execute(q)
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
reportStageEnd('drop-2')
|
Reference_Generator.py | import urllib.request as request
import shutil
from contextlib import closing
import gzip
import re
import os
import subprocess
from pathlib import Path
from multiprocessing import Process, current_process, cpu_count, Manager
from time import sleep
import os
import argparse
from sys import argv
from Web_Connector import Web_Connector
import requests
from gzip import open as gzip_open
__author__ = "Pedro Queirós"
__status__ = "Production"
__credits__ = ['Pedro Queirós']
SPLITTER='/'
class Reference_Generator():
def __init__(self,work_dir,remove_files=False,min_seqs=10,number_cores=None):
self.manager = Manager()
self.queue = self.manager.list()
self.remove_files=remove_files
self.min_seqs=min_seqs
if number_cores:
self.worker_count=int(number_cores)
else:
self.worker_count=self.check_environment_cores()
if not work_dir.startswith(SPLITTER): work_dir=f'{SPLITTER}{work_dir}'
if not work_dir.endswith(SPLITTER): work_dir=f'{work_dir}{SPLITTER}'
self.work_dir=work_dir
self.fasta_dir = f'{self.work_dir}fastas{SPLITTER}'
self.aln_dir = f'{self.work_dir}aln{SPLITTER}'
self.hmm_dir = f'{self.work_dir}hmm{SPLITTER}'
self.cluster_dir = f'{self.work_dir}cluster{SPLITTER}'
self.tmp_dir = f'{self.work_dir}tmp{SPLITTER}'
if os.path.exists(self.work_dir) and self.remove_files:
shutil.rmtree(self.work_dir)
for directory in [self.work_dir, self.fasta_dir, self.aln_dir, self.hmm_dir,self.cluster_dir,self.tmp_dir]:
Path(directory).mkdir(parents=True, exist_ok=True)
def run_command(self,command,shell=False):
process = subprocess.run(command, shell=shell)
return process
def run_prodigal(self):
fasta_folder=f'{self.fasta_dir}{SPLITTER}'
fna_list=[i for i in os.listdir(fasta_folder) if i.endswith('.fna')]
for fna in fna_list:
faa=fna.replace('.fna','.faa_pro')
prodigal_command=f'prodigal -i {fasta_folder}{fna} -a {fasta_folder}{faa}'
self.run_command(prodigal_command,shell=True)
def read_mmseqs_cluster_tsv(self,cluster_tsv_path):
clusters={}
with open(cluster_tsv_path) as file:
for line in file:
line=line.strip('\n')
representative_seq,other_seq=line.split('\t')
if representative_seq not in clusters: clusters[representative_seq]=set()
clusters[representative_seq].add(other_seq)
res={}
c=0
for representative_seq in clusters:
res[c]={representative_seq}
res[c].update(clusters[representative_seq])
c+=1
return res
def generate_clusters_fasta(self,cluster,clusters,cluster_seqs):
for c in clusters:
cluster_fasta_path = f'{self.fasta_dir}{cluster}_{c}.faa'
with open(cluster_fasta_path,'a+') as file:
for seq_id in clusters[c]:
seq=cluster_seqs[seq_id]
seq_line=f'>{seq_id}\n{seq}\n'
file.write(seq_line)
def split_fastas_by_clusters(self):
fasta_folder=f'{self.fasta_dir}'.rstrip(SPLITTER)
shutil.move(fasta_folder,f'{fasta_folder}_raw_fastas')
Path(fasta_folder).mkdir(parents=True, exist_ok=True)
for cluster in os.listdir(self.cluster_dir):
cluster_tsv=f'{self.cluster_dir}{cluster}{SPLITTER}{cluster}.clu_cluster.tsv'
cluster_fasta=f'{self.cluster_dir}{cluster}{SPLITTER}{cluster}.clu_all_seqs.fasta'
clusters=self.read_mmseqs_cluster_tsv(cluster_tsv)
cluster_seqs = self.read_protein_fasta(cluster_fasta)
self.generate_clusters_fasta(cluster,clusters,cluster_seqs)
def run_mmseqs(self):
faa_list=[i for i in os.listdir(self.fasta_dir) if i.endswith('.faa')]
for faa in faa_list:
current_cluster_dir = f'{self.cluster_dir}{faa}{SPLITTER}'.replace('.faa','')
Path(current_cluster_dir).mkdir(parents=True, exist_ok=True)
db=faa.replace('.faa','.db')
cluster=faa.replace('.faa','.clu')
tsv=faa.replace('.faa','.tsv')
#mmseqs_createdb=f'mmseqs createdb {self.fasta_dir}{faa} {current_cluster_dir}{db}'
#self.run_command(mmseqs_createdb,shell=True)
#mmseqs_cluster=f'mmseqs cluster {current_cluster_dir}{db} {current_cluster_dir}{cluster} {self.tmp_dir}'
#self.run_command(mmseqs_cluster,shell=True)
#mmseqs_tsv=f'mmseqs createtsv {current_cluster_dir}{db} {current_cluster_dir}{db} {current_cluster_dir}{cluster} {current_cluster_dir}{tsv} {self.tmp_dir}'
#self.run_command(mmseqs_tsv,shell=True)
mmseqs_tsv=f'mmseqs easy-cluster {self.fasta_dir}{faa} {current_cluster_dir}{cluster} {self.tmp_dir}'
self.run_command(mmseqs_tsv,shell=True)
def download_diamond(self):
diamond_url = 'http://github.com/bbuchfink/diamond/releases/download/v2.0.9/diamond-linux64.tar.gz'
archive_path = f'{self.work_dir}diamond-linux64.tar.gz'
with requests.get(diamond_url, stream=True) as r:
with open(archive_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
shutil.unpack_archive(archive_path, extract_dir=self.work_dir)
os.remove(archive_path)
def processes_handler(self, target_worker_function, add_sentinels=True):
'''
this will first generate one process per worker, then we add sentinels to the end of the list which will basically tell us when the queue is empty
if we need to add new work (e.g. when doing taxa annotation) we just add the new work to the start of the list
'''
# os.getpid to add the master_pid
if len(self.queue)<self.worker_count: worker_count=len(self.queue)
else: worker_count=self.worker_count
processes = [Process(target=target_worker_function, args=(self.queue, os.getpid(),)) for _ in range(worker_count)]
# adding sentinel record since queue can be signaled as empty when its really not
if add_sentinels:
for _ in range(worker_count): self.queue.append(None)
for process in processes:
process.start()
# we could manage the processes memory here with a while cycle
for process in processes:
process.join()
# exitcode 0 for sucessful exists
if process.exitcode != 0:
sleep(5)
print('Ran into an issue, check the log for details. Exitting!')
os._exit(1)
#######
def get_slurm_value(self,wanted_val, regex_pattern):
res = None
slurm_job_id = os.environ.get('SLURM_JOBID')
if slurm_job_id:
process = subprocess.run('sacct -j ' + str(slurm_job_id) + ' -o ' + wanted_val, shell=True,stdout=subprocess.PIPE)
wanted = re.search(regex_pattern, str(process.stdout))
if wanted: res = wanted.group()
return res
def check_environment_cores(self):
res = self.get_slurm_value('AllocCPUS', re.compile('\d+'))
if res:
if int(res):
print('Cores allocated by slurm:', res)
return int(res)
else:
res = cpu_count()
print('Cores allocated:', res)
return int(res)
else:
res = cpu_count()
print('Cores allocated:', res)
return int(res)
#low memory footprint_version
def read_protein_fasta_generator(self,protein_fasta_path):
query=None
seq=[]
if not os.path.exists(protein_fasta_path): return res
with open(protein_fasta_path, 'r') as file:
line = file.readline()
while line:
if line.startswith('>'):
if query:
if '|' in query:
query = query.split('|')[1]
if protein_fasta_path.endswith('_pro'):
query = query.split()[0]
seq=''.join(seq).upper()
if seq:
yield query,seq
seq=[]
query=line.replace('>','').strip()
else:
seq.append(line.strip())
line = file.readline()
if query:
if '|' in query:
query=query.split('|')[1]
if protein_fasta_path.endswith('_pro'):
query = query.split()[0]
seq = ''.join(seq).upper()
if seq:
yield query, seq
#low memory footprint_version
def read_protein_fasta(self,protein_fasta_path):
query=None
seq=[]
res={}
if not os.path.exists(protein_fasta_path): return res
with open(protein_fasta_path, 'r') as file:
line = file.readline()
while line:
if line.startswith('>'):
if query:
if '|' in query:
query = query.split('|')[1]
if protein_fasta_path.endswith('_pro'):
query = query.split()[0]
res[query]= ''.join(seq).upper()
seq=[]
query=line.replace('>','').strip()
else:
seq.append(line.strip())
line = file.readline()
if query:
if '|' in query:
query=query.split('|')[1]
if protein_fasta_path.endswith('_pro'):
query = query.split()[0]
res[query] = ''.join(seq).upper()
return res
def get_seqs_count(self,target_sample):
total_seqs = 0
with open(target_sample) as file:
line = file.readline()
while line:
if line[0] == '>': total_seqs += 1
line = file.readline()
return total_seqs
def download_file_ftp(self,url, file_path):
with closing(request.urlopen(url)) as r:
with open(file_path, 'wb') as f:
shutil.copyfileobj(r, f)
def unpack_gz(self,gz_file, output_file):
with gzip.open(gz_file, 'rb') as f_in:
with open(output_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def msa(self,fasta_file):
outfile = fasta_file.replace('.faa', '.aln')
outfile_path = f'{self.aln_dir}{os.path.basename(outfile)}'
print(fasta_file,outfile_path)
seqs_count = self.get_seqs_count(fasta_file)
if seqs_count >= self.min_seqs:
if seqs_count == 1:
shutil.copyfile(fasta_file, outfile_path)
else:
# muscle for small-mid aligns
command=None
if seqs_count <= 500:
command = f'muscle -in {fasta_file} -clwout {outfile_path} -clwstrict'
# clustal omega for larger
else:
command = f'clustalo -i {fasta_file} -o {outfile_path} --seqtype Protein --outfmt clu'
if command:
print('Running command:',command)
subprocess.run(command, shell=True)
def msa_worker_function(self,queue, master_pid):
while True:
record = queue.pop(0)
if record is None: break
self.msa(record)
def launch_fastas_msa(self):
completed_msa = os.listdir(self.aln_dir)
for file in os.listdir(self.fasta_dir):
if file.endswith('.faa'):
file_aln = file.replace('.faa', '.aln')
if file_aln not in completed_msa:
self.queue.append(f'{self.fasta_dir}{file}')
self.processes_handler(self.msa_worker_function)
def hmm_builder(self,aln_file):
software = 'hmmbuild'
outfile = aln_file.replace('.aln', '.hmm')
outfile_path = f'{self.hmm_dir}{os.path.basename(outfile)}'
command = f'{software} {outfile_path} {aln_file}'
print('Running command:', command)
subprocess.run(command, shell=True)
def hmm_builder_worker_function(self,queue, master_pid):
while True:
record = queue.pop(0)
if record is None: break
self.hmm_builder(record)
def launch_aln_hmmer(self):
completed_hmm = os.listdir(self.hmm_dir)
for file in os.listdir(self.aln_dir):
if file.endswith('.aln'):
file_aln = file.replace('.aln', '.hmm')
if file_aln not in completed_hmm:
self.queue.append(f'{self.aln_dir}{file}')
self.processes_handler(self.hmm_builder_worker_function)
def concat_files(self,output_file, list_file_paths):
print('Concatenating files into ', output_file)
with open(output_file, 'wb') as wfd:
for f in list_file_paths:
with open(f, 'rb') as fd:
shutil.copyfileobj(fd, wfd)
# forcing disk write
wfd.flush()
os.fsync(wfd.fileno())
def merge_profiles(self, output_file):
if not os.path.exists(output_file):
print('Merging profiles in ', self.hmm_dir)
profiles = [self.hmm_dir + SPLITTER + i for i in os.listdir(self.hmm_dir) if i.lower().endswith('.hmm')]
self.concat_files(output_file, profiles)
print('Pressing profile', output_file)
self.hmm_presser(output_file)
def hmm_presser(self,hmm_file):
software = 'hmmpress'
command = f'{software} {hmm_file}'
print('Running command:', command)
subprocess.run(command, shell=True)
def parse_bigg(self,rhea2bigg_path,wanted_dbs=[]):
print('Parsing BIGG metadata')
with open(rhea2bigg_path) as file:
file.readline()
line = file.readline()
res = {}
not_added = set()
while line:
line = line.strip('\n')
bigg_id, name, reaction_string, model_list, database_links, old_bigg_ids = line.split('\t')
if database_links:
database_links = database_links.split(';')
database_links = [i.strip() for i in database_links]
for db_link in database_links:
db, db_id = db_link.split(': ')
if db == 'RHEA' and 'rhea' in wanted_dbs and wanted_dbs:
db_id = db_id.split('/')[-1]
if db_id not in res: res[db_id] = set()
res[db_id].add(bigg_id)
elif db == 'Reactome Reaction' and 'reactome' in wanted_dbs and wanted_dbs:
db_id = db_id.split('/')[-1]
if db_id not in res: res[db_id] = set()
res[db_id].add(bigg_id)
elif db == 'EC Number' and 'ec' in wanted_dbs and wanted_dbs:
db_id = db_id.split('/')[-1]
if not db_id.endswith('-'):
if db_id not in res: res[db_id] = set()
res[db_id].add(bigg_id)
elif not wanted_dbs:
if db == 'RHEA': db='rhea'
elif db == 'Reactome Reaction': db='reactome'
elif db == 'EC Number': db='enzyme_ec'
elif db == 'MetaNetX (MNX) Equation': db='metanetx'
elif db == 'SEED Reaction': db='seed'
elif db == 'BioCyc': db='biocyc_reaction'
elif db == 'KEGG Reaction': db='kegg_reaction'
else:
print(db)
db_id = db_id.split('/')[-1]
if bigg_id not in res: res[bigg_id]={}
if db not in res[bigg_id]: res[bigg_id][db]=set()
res[bigg_id][db].add(db_id)
else:
not_added.add(db)
line = file.readline()
return res
#MSAs need to be checked for quality
class Reference_Generator_Uniprot(Reference_Generator):
def __init__(self,work_dir,remove_files,min_seqs,number_cores,db):
Reference_Generator.__init__(self,work_dir=work_dir,remove_files=remove_files,min_seqs=min_seqs,number_cores=number_cores)
self.db=db
self.workflow_function()
def parse_seq(self,file):
res=[]
line=file.readline()
line_type, line_value = line[0:2], line[5:].strip().strip('\n')
res.append(line_value)
while not line.startswith('//'):
line_type, line_value = line[0:2], line[5:].strip().strip('\n')
res.append(line_value)
line = file.readline()
res=''.join(res)
res=res.replace(' ','')
res=res.strip()
return res
def parse_function(self,file):
res = []
line = file.readline()
line_type, line_value = line[0:2], line[5:].strip().strip('\n')
start_recording=False
while line_type == 'CC':
if line_value.startswith('-!- FUNCTION:'):
temp=[]
temp.append(line_value)
start_recording=True
elif start_recording and not line_value.startswith('-!-'):
temp.append(line_value)
elif start_recording and line_value.startswith('-!-'):
temp=' '.join(temp)
temp=temp.replace('-!- FUNCTION:','')
temp=temp.split('{ECO:')[0]
temp=temp.strip()
isoform_search=re.search('\[Isoform \d+\]:',temp)
if isoform_search:
isoform_search=isoform_search.group()
temp=temp.replace(isoform_search,'')
pubmed_search=re.findall('PubMed:\d+,?\s?',temp)
if pubmed_search:
for i in pubmed_search:
temp=temp.replace(i,'')
temp=temp.replace('()','')
temp=temp.replace(' .','.')
temp=temp.strip()
res.append(temp)
start_recording=False
line_type, line_value = line[0:2], line[5:].strip().strip('\n')
line = file.readline()
return res
def yield_entries(self,uniprot_data):
seq_id, db_type, taxon_id, sequence = None, None, None,None
res={}
wanted_db_types=[
'eggnog',
'go',
'embl',
'kegg_gene',
'ncbi_gene',
'pfam',
'interpro',
'hamap',
'refseq',
'panther',
'ensemblbacteria',
'antibodypedia',
'orthodb',
'reactome',
'genecards',
'malacards',
'prints',
'tigrfams',
'prosite',
'string',
'biogrid',
'smart',
'gene3d',
'peptideatlas',
'proteomicsdb',
'phosphositeplus',
'biocyc',
'brenda',
'pdb',
'ensembl',
'ensemblmetazoa',
'ensemblplants',
'expressionatlas',
'genevisible',
'ensemblfungi',
'plantreactome',
'swisslipids',
'pathwaycommons',
'chembl',
'patric',
'genewiki',
'moondb',
]
with open(uniprot_data) as file:
for line in file:
db_type = None
line=line.strip('\n')
line_type, line_value = line[0:2], line[5:].strip()
if line_type=='ID':
db_type=None
if seq_id:
if 'eggnog' in res:
for eggnog_id in res['eggnog']:
if re.search('arCOG\d+',eggnog_id):
if 'arcog' not in res: res['arcog']=set()
res['arcog'].add(eggnog_id)
elif re.search('COG\d+',eggnog_id):
if 'cog' not in res: res['cog']=set()
res['cog'].add(eggnog_id)
yield seq_id,taxon_id,res,sequence
res={}
seq_id,db_type,taxon_id,sequence = None,None,None,None
seq_id=line_value.split()[0]
elif line_type == 'AC':
line_value=line_value.split(';')
db_type='uniprot'
elif line_type == 'GN' and line_value.startswith('Name='):
db_type = 'uniprot_gene'
line_value = line_value.replace('Name=', '').split(';')[0]
line_value = line_value.split('{ECO:')[0]
elif line_type == 'OX' and line_value.startswith('NCBI_TaxID='):
db_type = None
line_value=line_value.replace('NCBI_TaxID=','').strip()
line_value = line_value.split('{ECO:')[0]
line_value=line_value.strip(';').strip()
taxon_id=line_value
elif line_type == 'DE':
db_type=None
if line_value.startswith('RecName') or line_value.startswith('AltName'):
db_type = 'description'
line_value=line_value.replace('RecName:','').strip()
line_value=line_value.replace('AltName:','').strip()
line_value=line_value.replace('Full=','').strip()
line_value=line_value.split('{ECO:')[0]
line_value=line_value.strip(';')
elif line_value.startswith('EC='):
db_type = 'enzyme_ec'
line_value=line_value.replace('EC=','').strip().split()[0]
line_value=line_value.strip(';')
elif line_type=='CC':
db_type = 'description'
line_value=self.parse_function(file)
elif line_type=='SQ':
db_type=None
sequence=self.parse_seq(file)
elif line_type=='DR':
#database reference
db_type,line_value= line_value.split(';')[0:2]
db_type,line_value=db_type.strip(),line_value.strip()
db_type=db_type.lower()
if db_type=='go':
line_value=line_value.replace('GO:','')
elif db_type=='kegg': db_type='kegg_gene'
elif db_type=='geneid': db_type='ncbi_gene'
elif db_type=='tigrfams': db_type='tigrfam'
if db_type not in wanted_db_types: db_type=None
if db_type:
if not isinstance(line_value,list): line_value=[line_value]
if db_type not in res: res[db_type]=set()
line_value=[i.strip() for i in line_value]
line_value=[i for i in line_value if i]
res[db_type].update(line_value)
yield seq_id, taxon_id, res, sequence
def output_uniprot_data_hmm(self,data_generator,db_type):
for seq_id,taxon_id,seq_metadata,sequence in data_generator:
if db_type in seq_metadata:
for db_id in seq_metadata[db_type]:
current_fasta = f'{self.fasta_dir}{db_id}.faa'
with open(current_fasta, 'a+') as file:
line = f'>{seq_id}\n{sequence}'
file.write(f'{line}\n')
self.run_mmseqs()
self.split_fastas_by_clusters()
def create_hmms(self,db_type):
data_url = 'https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz'
compressed_data = f'{self.work_dir}uniprot_sprot.dat.gz'
uncompressed_data = f'{self.work_dir}uniprot_sprot.dat'
if not os.path.exists(compressed_data) or not os.path.exists(uncompressed_data):
print(f'Downloading data:\n{data_url}')
self.download_file_ftp(data_url, compressed_data)
if not os.path.exists(uncompressed_data):
print(f'Uncompressing data:\n{compressed_data}')
self.unpack_gz(compressed_data, uncompressed_data)
data_generator=self.yield_entries(uncompressed_data)
self.output_uniprot_data_hmm(data_generator,db_type)
#self.launch_fastas_msa()
#self.launch_aln_hmmer()
#hmm_file=f'{self.work_dir}uniprot_ec.hmm'
#self.merge_profiles(output_file=hmm_file)
#print(f'Finished generating {hmm_file}')
def output_uniprot_data_taxa(self,data_generator):
general_taxon=self.get_ncbi_domains()
for seq_id,taxon_id,seq_metadata,sequence in data_generator:
c+=1
if not taxon_id:
current_folder=f'{self.fasta_dir}uniprotG{SPLITTER}'
Path(current_folder).mkdir(parents=True, exist_ok=True)
current_fasta=f'{current_folder}uniprotG_merged.faa'
current_metadata=f'{current_folder}metadata.tsv'
else:
current_folder=f'{self.fasta_dir}{taxon_id}{SPLITTER}'
Path(current_folder).mkdir(parents=True, exist_ok=True)
current_fasta=f'{current_folder}{taxon_id}_merged.faa'
current_metadata=f'{current_folder}metadata.faa'
with open(current_fasta, 'a+') as file:
line = f'>{seq_id}\n{sequence}'
file.write(f'{line}\n')
with open(current_metadata, 'a+') as file:
temp = [f'{seq_id}','|']
for db in seq_metadata:
for db_id in seq_metadata[db]:
temp.append(f'{db}:{db_id}')
line='\t'.join(temp)
file.write(f'{line}\n')
def output_uniprot_data_dmnd(self,data_generator,fasta_file,metadata_file):
if os.path.exists(fasta_file):
os.remove(fasta_file)
if os.path.exists(metadata_file):
os.remove(metadata_file)
for seq_id,taxon_id,seq_metadata,sequence in data_generator:
with open(fasta_file, 'a+') as file:
line = f'>{seq_id}\n{sequence}'
file.write(f'{line}\n')
with open(metadata_file, 'a+') as file:
temp = [f'{seq_id}', '|']
for db in seq_metadata:
for db_id in seq_metadata[db]:
temp.append(f'{db}:{db_id}')
line = '\t'.join(temp)
file.write(f'{line}\n')
def create_diamond_dbs(self):
if self.db=='swissprot':
data_url='https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz'
compressed_data = f'{self.work_dir}uniprot_sprot.dat.gz'
uncompressed_data = f'{self.work_dir}uniprot_sprot.dat'
elif self.db=='trembl':
data_url='https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_trembl.dat.gz'
compressed_data = f'{self.work_dir}uniprot_trembl.dat.gz'
uncompressed_data = f'{self.work_dir}uniprot_trembl.dat'
if not os.path.exists(compressed_data) or not os.path.exists(uncompressed_data):
print(f'Downloading data:\n{data_url}')
self.download_file_ftp(data_url, compressed_data)
if not os.path.exists(uncompressed_data):
print(f'Uncompressing data:\n{compressed_data}')
self.unpack_gz(compressed_data, uncompressed_data)
data_generator=self.yield_entries(uncompressed_data)
fasta_file = f'{self.fasta_dir}uniprot_merged.faa'
metadata_file = f'{self.fasta_dir}metadata.tsv'
self.output_uniprot_data_dmnd(data_generator,fasta_file,metadata_file)
self.download_diamond()
diamond_path = f'{self.work_dir}diamond'
dmnd_file=fasta_file.replace('.faa','')
dmnd_command = f'{diamond_path} makedb --in {fasta_file} -d {dmnd_file}'
subprocess.run(dmnd_command.split())
os.remove(diamond_path)
print(f'Finished generating {dmnd_file}')
def workflow_function(self):
if self.db=='swissprot' or self.db=='trembl':
self.create_diamond_dbs()
elif self.db=='ec':
self.create_hmms(db_type='enzyme_ec')
#MSAs need to be checked for quality
class Reference_Generator_Rhea(Reference_Generator):
def __init__(self,work_dir,remove_files,min_seqs,number_cores):
Reference_Generator.__init__(self,work_dir=work_dir,remove_files=remove_files,min_seqs=min_seqs,number_cores=number_cores)
self.workflow_function()
self.write_metadata()
def parse_rhea2uniprot(self,rhea2uniprot_path):
print('Parsing rhea2uniprot')
res = {}
with open(rhea2uniprot_path) as file:
line = file.readline()
line = file.readline()
while line:
line = line.strip('\n')
if line:
rhea_id, direction, master_id, uniprot_id = line.split('\t')
if master_id not in res: res[master_id] = set()
res[master_id].add(uniprot_id)
line = file.readline()
return res
def write_metadata(self):
rhea2xrefs_file = f'{self.work_dir}rhea2xrefs.tsv'
rhea2xrefs_url='https://ftp.expasy.org/databases/rhea/tsv/rhea2xrefs.tsv'
rhea2bigg_file=f'{self.work_dir}bigg_models_reactions.txt'
rhea2bigg_url='http://bigg.ucsd.edu/static/namespace/bigg_models_reactions.txt'
if not os.path.exists(rhea2xrefs_file):
self.download_file_ftp(rhea2xrefs_url, rhea2xrefs_file)
if not os.path.exists(rhea2bigg_file):
self.download_file_ftp(rhea2bigg_url, rhea2bigg_file)
wanted_db='rhea'
metadata_file = f'{self.work_dir}metadata.tsv'
rhea2bigg=self.parse_bigg(rhea2bigg_file,wanted_dbs=[wanted_db])
if not os.path.exists(metadata_file):
print('Parsing rhea2xrefs')
rhea2ids={}
with open(rhea2xrefs_file) as file:
line=file.readline()
line=file.readline()
while line:
line=line.strip('\n')
if line:
rhea_id,direction,master_id,db_id,db_type=line.split('\t')
if master_id not in rhea2ids: rhea2ids[master_id]={}
if db_type=='EC': db_type='enzyme_ec'
elif db_type=='METACYC': db_type='biocyc_reaction'
elif db_type=='ECOCYC': db_type='biocyc_reaction'
elif db_type=='KEGG_REACTION': db_type='kegg_reaction'
elif db_type=='GO':
db_type='go'
db_id=db_id.strip('GO:')
elif db_type=='REACTOME': db_type=None
elif db_type=='MACIE': db_type=None
if db_type:
if db_type not in rhea2ids[master_id]: rhea2ids[master_id][db_type]=set()
rhea2ids[master_id][db_type].add(db_id)
line=file.readline()
with open(metadata_file,'w+') as file:
for main_id in rhea2ids:
line = [main_id,'|']
line.append(f'{wanted_db}:{main_id}')
if main_id in rhea2bigg:
for bigg_id in rhea2bigg[main_id]:
line.append(f'bigg:{bigg_id}')
for db_type in rhea2ids[main_id]:
for db_id in rhea2ids[main_id][db_type]:
line.append(f'{db_type}:{db_id}')
file.write('\t'.join(line)+'\n')
os.remove(rhea2xrefs_file)
os.remove(rhea2bigg_file)
def fasta_writer(self,uniprot_mapping, uncompressed_uniprot_fastas):
uniprot_seqs=self.read_protein_fasta_generator(uncompressed_uniprot_fastas)
for uniprot_seq in uniprot_seqs:
uniprot_id,sequence=uniprot_seq
for main_id in uniprot_mapping:
seq_ids=uniprot_mapping[main_id]
if uniprot_id in seq_ids:
fasta_file = f'{self.fasta_dir}{main_id}.faa'
with open(fasta_file, 'a+') as file:
outline = f'>{uniprot_id}\n{sequence}\n'
file.write(outline)
def workflow_function(self):
uniprot_fastas_url = 'https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta.gz'
compressed_uniprot_fastas = f'{self.work_dir}uniprot_file.xml.gz'
uncompressed_uniprot_fastas = f'{self.work_dir}uniprot_file.xml'
rhea2uiniprot_url='https://ftp.expasy.org/databases/rhea/tsv/rhea2uniprot.tsv'
rhea2uiniprot_file = f'{self.work_dir}rhea2uniprot.tsv'
hmm_file=f'{self.work_dir}uniprot_rhea.hmm'
if not os.path.exists(compressed_uniprot_fastas) or not os.path.exists(uncompressed_uniprot_fastas):
self.download_file_ftp(uniprot_fastas_url, compressed_uniprot_fastas)
if not os.path.exists(rhea2uiniprot_file):
self.download_file_ftp(rhea2uiniprot_url, rhea2uiniprot_file)
if not os.path.exists(uncompressed_uniprot_fastas):
self.unpack_gz(compressed_uniprot_fastas, uncompressed_uniprot_fastas)
if not os.listdir(self.fasta_dir):
rhea_uniprot = self.parse_rhea2uniprot(rhea2uiniprot_file)
self.fasta_writer(rhea_uniprot, uncompressed_uniprot_fastas)
self.launch_fastas_msa()
self.launch_aln_hmmer()
self.merge_profiles(output_file=hmm_file)
print(f'Finished generating {hmm_file}')
class Reference_Generator_Reactome(Reference_Generator):
def __init__(self,work_dir,remove_files,min_seqs,number_cores):
Reference_Generator.__init__(self,work_dir=work_dir,remove_files=remove_files,min_seqs=min_seqs,number_cores=number_cores)
self.workflow_function()
def parse_reactome2uniprot(self,reactome2uniprot_path):
print('Parsing reactome2uniprot')
res = {}
with open(reactome2uniprot_path) as file:
line = file.readline()
while line:
line = line.strip('\n')
if line:
line = line.split('\t')
uniprot_id, master_id = line[0], line[1]
if uniprot_id not in res: res[uniprot_id] = set()
res[uniprot_id].add(master_id)
line = file.readline()
return res
def write_metadata(self,reactome_uniprot):
bigg2refs_file=f'{self.work_dir}bigg_models_reactions.txt'
bigg2refs_file_url='http://bigg.ucsd.edu/static/namespace/bigg_models_reactions.txt'
if not os.path.exists(bigg2refs_file):
self.download_file_ftp(bigg2refs_file_url, bigg2refs_file)
wanted_db='reactome'
metadata_file = f'{self.work_dir}metadata.tsv'
bigg_metadata=self.parse_bigg(bigg2refs_file,wanted_dbs=[wanted_db])
if not os.path.exists(metadata_file):
with open(metadata_file,'w+') as file:
for uniprot_id in reactome_uniprot:
line = [uniprot_id,'|']
for reactome_id in reactome_uniprot[uniprot_id]:
line.append(f'reactome:{reactome_id}')
if reactome_id in bigg_metadata:
for db_id in bigg_metadata[reactome_id]:
line.append(f'bigg:{db_id}')
file.write('\t'.join(line)+'\n')
os.remove(bigg2refs_file)
def merge_faa(self,main_fasta):
fasta_folder=f'{self.fasta_dir}{SPLITTER}'
with open(main_fasta, 'a+') as file:
for fasta in os.listdir(fasta_folder):
all_sequences=self.read_protein_fasta_generator(fasta)
for seq_id,protein_sequence in all_sequences:
outline = f'>{seq_id}\n{protein_sequence}\n'
file.write(outline)
def fasta_writer(self,fasta_path,uniprot_mapping, uncompressed_uniprot_fastas):
uniprot_seqs=self.read_protein_fasta_generator(uncompressed_uniprot_fastas)
with open(fasta_path, 'w+') as file:
for uniprot_id,sequence in uniprot_seqs:
if uniprot_id in uniprot_mapping:
outline = f'>{uniprot_id}\n{sequence}\n'
file.write(outline)
def workflow_function(self):
uniprot_fastas_url = 'https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta.gz'
compressed_uniprot_fastas = f'{self.work_dir}uniprot_file.faa.gz'
uncompressed_uniprot_fastas = f'{self.work_dir}uniprot_file.faa'
reactome2uniprot_url='https://reactome.org/download/current/UniProt2ReactomeReactions.txt'
reactome2uniprot_file = f'{self.work_dir}UniProt2ReactomeReactions.txt'
dmnd_file=f'{self.work_dir}reactome'
fasta_path = f'{self.work_dir}reactome.faa'
if not os.path.exists(compressed_uniprot_fastas) or not os.path.exists(uncompressed_uniprot_fastas):
self.download_file_ftp(uniprot_fastas_url, compressed_uniprot_fastas)
if not os.path.exists(reactome2uniprot_file):
self.download_file_ftp(reactome2uniprot_url, reactome2uniprot_file)
if not os.path.exists(uncompressed_uniprot_fastas):
self.unpack_gz(compressed_uniprot_fastas, uncompressed_uniprot_fastas)
if not os.listdir(self.fasta_dir):
reactome_uniprot = self.parse_reactome2uniprot(reactome2uniprot_file)
self.fasta_writer(fasta_path,reactome_uniprot, uncompressed_uniprot_fastas)
diamond_path=f'{self.work_dir}diamond'
if not os.path.exists(diamond_path):
self.download_diamond()
dmnd_command=f'{diamond_path} makedb --in {fasta_path} -d {dmnd_file}'
subprocess.run(dmnd_command.split())
self.write_metadata(reactome_uniprot)
os.remove(diamond_path)
print(f'Finished generating {dmnd_file}.dmnd')
os.remove(reactome2uniprot_file)
os.remove(compressed_uniprot_fastas)
os.remove(uncompressed_uniprot_fastas)
class Reference_Generator_BIGG(Reference_Generator,Web_Connector):
def __init__(self,work_dir,remove_files,min_seqs,number_cores):
Reference_Generator.__init__(self,work_dir=work_dir,remove_files=remove_files,min_seqs=min_seqs,number_cores=number_cores)
Web_Connector.__init__(self)
self.mp_results = self.manager.list()
self.workflow_function()
def get_all_models(self):
res=set()
models_url='http://bigg.ucsd.edu/api/v2/models'
json_page=self.get_url_json(models_url)
results=json_page['results']
for i in results:
bigg_id=i['bigg_id']
res.add(bigg_id)
return res
def get_genes_model(self,model_id):
res=set()
models_url=f'http://bigg.ucsd.edu/api/v2/models/{model_id}/genes'
json_page=self.get_url_json(models_url)
results=json_page['results']
for i in results:
bigg_id=i['bigg_id']
res.add(bigg_id)
return res
def get_gene_info(self,model_id,gene_id):
models_url=f'http://bigg.ucsd.edu/api/v2/models/{model_id}/genes/{gene_id}'
#print(f'Getting info for model {model_id} and gene {gene_id}')
json_page=self.get_url_json(models_url)
protein_sequence=json_page['protein_sequence']
dna_sequence=json_page['dna_sequence']
if protein_sequence or dna_sequence:
reactions=json_page['reactions']
reactions_bigg=set()
for i in reactions:
bigg_id=i['bigg_id']
reactions_bigg.add(bigg_id)
return [gene_id,protein_sequence,dna_sequence,reactions_bigg]
def gene_info_worker_function(self, queue, master_pid):
while True:
record = queue.pop(0)
if record is None: break
arg1, arg2 = record
gene_info=self.get_gene_info(arg1, arg2)
if gene_info:
self.mp_results.append(gene_info)
def launch_reaction_info_retrieval(self, model_id, genes_list):
for gene_id in genes_list:
self.queue.append([model_id, gene_id])
self.processes_handler(self.gene_info_worker_function)
while self.mp_results:
yield self.mp_results.pop(0)
def export_to_fasta(self,model_id,gene_id,protein_sequence,dna_sequence):
fasta_path_aa = f'{self.fasta_dir}{model_id}.faa_pre'
fasta_path_nt = f'{self.fasta_dir}{model_id}.fna'
if protein_sequence:
with open(fasta_path_aa, 'a+') as file:
outline = f'>{gene_id}\n{protein_sequence}\n'
file.write(outline)
if dna_sequence:
with open(fasta_path_nt, 'a+') as file:
outline = f'>{gene_id}\n{dna_sequence}\n'
file.write(outline)
def write_metadata(self,bigg_metadata,metadata_file,sequences,prodigal_proteins):
with open(metadata_file,'a+') as file:
for seq_id in sequences:
if seq_id in prodigal_proteins:
baseline_seq_id=seq_id.split('_')[0:-1]
baseline_seq_id='_'.join(baseline_seq_id)
else: baseline_seq_id =seq_id
line = [baseline_seq_id,'|']
if baseline_seq_id in self.genes_reactions:
for reaction_id in self.genes_reactions[baseline_seq_id]:
line.append(f'bigg_reaction:{reaction_id}')
if reaction_id in bigg_metadata:
for db in bigg_metadata[reaction_id]:
for db_id in bigg_metadata[reaction_id][db]:
line.append(f'{db}:{db_id}')
file.write('\t'.join(line)+'\n')
else:
print(f'Seq missing {baseline_seq_id}')
def merge_faa(self):
'''
this will merge the two fastas, one generated with the protein sequences extracted from bigg,
the other with the protein sequences predicted with prodigal
the resulting fasta will add protein sequences from bigg per gene, if not available it adds protein sequences from prodigal
'''
bigg_file=f'{self.work_dir}bigg_models_reactions.txt'
bigg_url='http://bigg.ucsd.edu/static/namespace/bigg_models_reactions.txt'
if not os.path.exists(bigg_file):
self.download_file_ftp(bigg_url, bigg_file)
metadata_file = f'{self.work_dir}metadata.tsv'
bigg_metadata=self.parse_bigg(bigg_file)
fasta_folder=f'{self.fasta_dir}{SPLITTER}'
model_list=[i.replace('.fna','') for i in os.listdir(fasta_folder) if i.endswith('.fna')]
for model_id in model_list:
faa_pre=f'{fasta_folder}{model_id}.faa_pre'
faa_prodigal=f'{fasta_folder}{model_id}.faa_pro'
fna=f'{fasta_folder}{model_id}.fna'
all_pre_proteins=self.read_protein_fasta(faa_pre)
all_prodigal_proteins=self.read_protein_fasta(faa_prodigal)
all_sequences=set(list(all_pre_proteins.keys())+list(all_prodigal_proteins.keys()))
self.write_metadata(bigg_metadata,metadata_file,all_sequences,all_prodigal_proteins)
for seq_id in all_sequences:
fasta_path_aa = f'{self.fasta_dir}{SPLITTER}{model_id}.faa'
protein_sequence=None
#if bigg provides protein sequences we use them
if seq_id in all_pre_proteins:
protein_sequence=all_pre_proteins[seq_id]
#if bigg doesnt, we predict with prodigal
elif seq_id not in all_pre_proteins and seq_id in all_prodigal_proteins:
protein_sequence=all_prodigal_proteins[seq_id]
#if there is not protein and dna sequence, we just report it
else:
print(f'Did not manage to export sequence {seq_id} for model {model_id}')
if protein_sequence:
with open(fasta_path_aa, 'a+') as file:
outline = f'>{seq_id}\n{protein_sequence}\n'
file.write(outline)
def export_bigg_faa(self):
'''
merges all the sequences into one big file
'''
fasta_folder=f'{self.fasta_dir}{SPLITTER}'
faa_list=[i for i in os.listdir(fasta_folder) if i.endswith('.faa')]
bigg_path = f'{self.work_dir}bigg.faa'
with open(bigg_path, 'a+') as file:
for faa in faa_list:
faa_path=f'{fasta_folder}{faa}'
all_seqs_generator=self.read_protein_fasta_generator(faa_path)
for seq_id,protein_sequence in all_seqs_generator:
outline = f'>{seq_id}\n{protein_sequence}\n'
file.write(outline)
def fasta_writer(self):
self.genes_reactions={}
for model_id in self.get_all_models():
print(f'Getting info for model {model_id}')
genes_list=self.get_genes_model(model_id)
reactions_generator=self.launch_reaction_info_retrieval(model_id,genes_list)
for gene_id, protein_sequence,dna_sequence, reactions_bigg in reactions_generator:
if gene_id not in self.genes_reactions: self.genes_reactions[gene_id]=set()
self.export_to_fasta(model_id,gene_id, protein_sequence,dna_sequence)
for reaction_id in reactions_bigg:
self.genes_reactions[gene_id].add(reaction_id)
self.run_prodigal()
self.merge_faa()
self.export_bigg_faa()
def workflow_function(self):
dmnd_file=f'{self.work_dir}bigg'
fasta_path = f'{self.work_dir}bigg.faa'
bigg_file=f'{self.work_dir}bigg_models_reactions.txt'
self.fasta_writer()
self.download_diamond()
diamond_path=f'{self.work_dir}diamond'
dmnd_command=f'{diamond_path} makedb --in {fasta_path} -d {dmnd_file}'
subprocess.run(dmnd_command.split())
print(f'Finished generating {dmnd_file}')
os.remove(diamond_path)
os.remove(bigg_file)
if __name__ == '__main__':
print('Executing command:\n', ' '.join(argv))
parser = argparse.ArgumentParser(description='This is a functional annotation reference generator tool\n'
, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-db','--database', help='[required]\tClustering ID',choices=['ec', 'rhea','reactome','bigg_genes','swissprot','trembl'])
parser.add_argument('-o', '--output_folder', help='[required]\tDirectory to save HMMs in')
parser.add_argument('-c', '--number_cores', help='[optional]\tNumber of cores to use')
parser.add_argument('-ms', '--min_seqs',help='[optional]\tMinimum sequences per HMM. Default is 10')
parser.add_argument('-rf', '--remove_files', action='store_true',help='[optional]\tuse this to remove files from previous runs.')
args = parser.parse_args()
database = args.database
output_folder = args.output_folder
min_seqs = args.min_seqs
number_cores = args.number_cores
remove_files = args.remove_files
if min_seqs: min_seqs=int(min_seqs)
else: min_seqs=10
if output_folder:
is_absolute=os.path.isabs(output_folder)
if not is_absolute:
output_folder= os.getcwd()+SPLITTER+output_folder
if not output_folder:
print('Missing output folder!')
elif database=='rhea':
updater = Reference_Generator_Rhea(output_folder, remove_files,min_seqs,number_cores)
elif database=='reactome':
updater=Reference_Generator_Reactome(output_folder,remove_files,min_seqs,number_cores)
elif database=='swissprot':
updater=Reference_Generator_Uniprot(output_folder,remove_files,min_seqs,number_cores,db='swissprot')
elif database=='trembl':
updater=Reference_Generator_Uniprot(output_folder,remove_files,min_seqs,number_cores,db='trembl')
elif database=='ec':
updater=Reference_Generator_Uniprot(output_folder,remove_files,min_seqs,number_cores,db='ec')
elif database=='bigg_genes':
updater=Reference_Generator_BIGG(output_folder,remove_files,min_seqs,number_cores)
else:
print('Command is not valid')
|
mtping2.py | import subprocess
import threading
class Ping:
def __init__(self, host):
self.host = host
def __call__(self):
rc = subprocess.call(
'ping -c2 %s &> /dev/null' % self.host,
shell=True
)
if rc == 0:
print('\033[32;1m%s:up\033[0m' % self.host)
else:
print('\033[31;1m%s:down\033[0m' % self.host)
if __name__ == '__main__':
ips = ['172.40.58.%s' % i for i in range(1, 255)]
for ip in ips:
t = threading.Thread(target=Ping(ip))
t.start() # target()
|
test_gc.py | import unittest
import unittest.mock
from test.support import (verbose, refcount_test, run_unittest,
cpython_only, start_threads,
temp_dir, TESTFN, unlink,
import_module)
from test.support.script_helper import assert_python_ok, make_script
import gc
import sys
import sysconfig
import textwrap
import threading
import time
import weakref
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
try:
from _testcapi import ContainerNoGC
except ImportError:
ContainerNoGC = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
if sysconfig.get_config_vars().get('PY_CFLAGS', ''):
BUILD_WITH_NDEBUG = ('-DNDEBUG' in sysconfig.get_config_vars()['PY_CFLAGS'])
else:
# Usually, sys.gettotalrefcount() is only present if Python has been
# compiled in debug mode. If it's missing, expect that Python has
# been released in release mode: with NDEBUG defined.
BUILD_WITH_NDEBUG = (not hasattr(sys, 'gettotalrefcount'))
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertTrue(gc.is_tracked(UserClassSlots()))
self.assertTrue(gc.is_tracked(UserFloatSlots()))
self.assertTrue(gc.is_tracked(UserIntSlots()))
def test_is_finalized(self):
# Objects not tracked by the always gc return false
self.assertFalse(gc.is_finalized(3))
storage = []
class Lazarus:
def __del__(self):
storage.append(self)
lazarus = Lazarus()
self.assertFalse(gc.is_finalized(lazarus))
del lazarus
gc.collect()
lazarus = storage.pop()
self.assertTrue(gc.is_finalized(lazarus))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout, b"")
return stderr
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_global_del_SystemExit(self):
code = """if 1:
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
def test_freeze(self):
gc.freeze()
self.assertGreater(gc.get_freeze_count(), 0)
gc.unfreeze()
self.assertEqual(gc.get_freeze_count(), 0)
def test_get_objects(self):
gc.collect()
l = []
l.append(l)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=0)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=1)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=2)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
del l
gc.collect()
def test_get_objects_arguments(self):
gc.collect()
self.assertEqual(len(gc.get_objects()),
len(gc.get_objects(generation=None)))
self.assertRaises(ValueError, gc.get_objects, 1000)
self.assertRaises(ValueError, gc.get_objects, -1000)
self.assertRaises(TypeError, gc.get_objects, "1")
self.assertRaises(TypeError, gc.get_objects, 1.234)
def test_resurrection_only_happens_once_per_object(self):
class A: # simple self-loop
def __init__(self):
self.me = self
class Lazarus(A):
resurrected = 0
resurrected_instances = []
def __del__(self):
Lazarus.resurrected += 1
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
# We start with 0 resurrections
laz = Lazarus()
self.assertEqual(Lazarus.resurrected, 0)
# Deleting the instance and triggering a collection
# resurrects the object
del laz
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
self.assertEqual(len(Lazarus.resurrected_instances), 1)
# Clearing the references and forcing a collection
# should not resurrect the object again.
Lazarus.resurrected_instances.clear()
self.assertEqual(Lazarus.resurrected, 1)
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
gc.enable()
def test_resurrection_is_transitive(self):
class Cargo:
def __init__(self):
self.me = self
class Lazarus:
resurrected_instances = []
def __del__(self):
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
laz = Lazarus()
cargo = Cargo()
cargo_id = id(cargo)
# Create a cycle between cargo and laz
laz.cargo = cargo
cargo.laz = laz
# Drop the references, force a collection and check that
# everything was resurrected.
del laz, cargo
gc.collect()
self.assertEqual(len(Lazarus.resurrected_instances), 1)
instance = Lazarus.resurrected_instances.pop()
self.assertTrue(hasattr(instance, "cargo"))
self.assertEqual(id(instance.cargo), cargo_id)
gc.collect()
gc.enable()
def test_resurrection_does_not_block_cleanup_of_other_objects(self):
# When a finalizer resurrects objects, stats were reporting them as
# having been collected. This affected both collect()'s return
# value and the dicts returned by get_stats().
N = 100
class A: # simple self-loop
def __init__(self):
self.me = self
class Z(A): # resurrecting __del__
def __del__(self):
zs.append(self)
zs = []
def getstats():
d = gc.get_stats()[-1]
return d['collected'], d['uncollectable']
gc.collect()
gc.disable()
# No problems if just collecting A() instances.
oldc, oldnc = getstats()
for i in range(N):
A()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N) # instance object & its dict
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# But Z() is not actually collected.
oldc, oldnc = c, nc
Z()
# Nothing is collected - Z() is merely resurrected.
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 0)
self.assertEqual(c - oldc, 0)
self.assertEqual(nc - oldnc, 0)
# Z() should not prevent anything else from being collected.
oldc, oldnc = c, nc
for i in range(N):
A()
Z()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N)
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# The A() trash should have been reclaimed already but the
# 2 copies of Z are still in zs (and the associated dicts).
oldc, oldnc = c, nc
zs.clear()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 4)
self.assertEqual(c - oldc, 4)
self.assertEqual(nc - oldnc, 0)
gc.enable()
@unittest.skipIf(ContainerNoGC is None,
'requires ContainerNoGC extension type')
def test_trash_weakref_clear(self):
# Test that trash weakrefs are properly cleared (bpo-38006).
#
# Structure we are creating:
#
# Z <- Y <- A--+--> WZ -> C
# ^ |
# +--+
# where:
# WZ is a weakref to Z with callback C
# Y doesn't implement tp_traverse
# A contains a reference to itself, Y and WZ
#
# A, Y, Z, WZ are all trash. The GC doesn't know that Z is trash
# because Y does not implement tp_traverse. To show the bug, WZ needs
# to live long enough so that Z is deallocated before it. Then, if
# gcmodule is buggy, when Z is being deallocated, C will run.
#
# To ensure WZ lives long enough, we put it in a second reference
# cycle. That trick only works due to the ordering of the GC prev/next
# linked lists. So, this test is a bit fragile.
#
# The bug reported in bpo-38006 is caused because the GC did not
# clear WZ before starting the process of calling tp_clear on the
# trash. Normally, handle_weakrefs() would find the weakref via Z and
# clear it. However, since the GC cannot find Z, WR is not cleared and
# it can execute during delete_garbage(). That can lead to disaster
# since the callback might tinker with objects that have already had
# tp_clear called on them (leaving them in possibly invalid states).
callback = unittest.mock.Mock()
class A:
__slots__ = ['a', 'y', 'wz']
class Z:
pass
# setup required object graph, as described above
a = A()
a.a = a
a.y = ContainerNoGC(Z())
a.wz = weakref.ref(a.y.value, callback)
# create second cycle to keep WZ alive longer
wr_cycle = [a.wz]
wr_cycle.append(wr_cycle)
# ensure trash unrelated to this test is gone
gc.collect()
gc.disable()
# release references and create trash
del a, wr_cycle
gc.collect()
# if called, it means there is a bug in the GC. The weakref should be
# cleared before Z dies.
callback.assert_not_called()
gc.enable()
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncollectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
@unittest.skipIf(BUILD_WITH_NDEBUG,
'built with -NDEBUG')
def test_refcount_errors(self):
self.preclean()
# Verify the "handling" of objects with broken refcounts
# Skip the test if ctypes is not available
import_module("ctypes")
import subprocess
code = textwrap.dedent('''
from test.support import gc_collect, SuppressCrashReport
a = [1, 2, 3]
b = [a]
# Avoid coredump when Py_FatalError() calls abort()
SuppressCrashReport().__enter__()
# Simulate the refcount of "a" being too low (compared to the
# references held on it by live data), but keeping it above zero
# (to avoid deallocating it):
import ctypes
ctypes.pythonapi.Py_DecRef(ctypes.py_object(a))
# The garbage collector should now have a fatal error
# when it reaches the broken object
gc_collect()
''')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
# Verify that stderr has a useful error message:
self.assertRegex(stderr,
br'gcmodule\.c:[0-9]+: gc_decref: Assertion "gc_get_refs\(g\) > 0" failed.')
self.assertRegex(stderr,
br'refcount is too small')
# "address : 0x7fb5062efc18"
# "address : 7FB5062EFC18"
address_regex = br'[0-9a-fA-Fx]+'
self.assertRegex(stderr,
br'object address : ' + address_regex)
self.assertRegex(stderr,
br'object refcount : 1')
self.assertRegex(stderr,
br'object type : ' + address_regex)
self.assertRegex(stderr,
br'object type name: list')
self.assertRegex(stderr,
br'object repr : \[1, 2, 3\]')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
class PythonFinalizationTests(unittest.TestCase):
def test_ast_fini(self):
# bpo-44184: Regression test for subtype_dealloc() when deallocating
# an AST instance also destroy its AST type: subtype_dealloc() must
# not access the type memory after deallocating the instance, since
# the type memory can be freed as well. The test is also related to
# _PyAST_Fini() which clears references to AST types.
code = textwrap.dedent("""
import ast
import codecs
# Small AST tree to keep their AST types alive
tree = ast.parse("def f(x, y): return 2*x-y")
x = [tree]
x.append(x)
# Put the cycle somewhere to survive until the last GC collection.
# Codec search functions are only cleared at the end of
# interpreter_clear().
def search_func(encoding):
return None
search_func.a = x
codecs.register(search_func)
""")
assert_python_ok("-c", code)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(
GCTests,
GCCallbackTests,
GCTogglingTests,
PythonFinalizationTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
test_collection_count.py | import pdb
import pytest
import logging
import itertools
from time import sleep
import threading
from multiprocessing import Process
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
add_time_interval = 3
tag = "1970-01-01"
nb = 6000
class TestCollectionCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_collection_rows_count(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=collection, records=vectors)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_partition(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partition and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_multi_partitions_A(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partitions and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status = connect.create_partition(collection, new_tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_multi_partitions_B(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partitions and add vectors in one of the partitions,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status = connect.create_partition(collection, new_tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_multi_partitions_C(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partitions and add vectors in one of the partitions,
assert the value returned by count_collection method is equal to length of vectors
expected: the collection count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status = connect.create_partition(collection, new_tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag)
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=new_tag)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb * 2
def test_collection_rows_count_after_index_created(self, connect, collection, get_simple_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
nb = 100
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=collection, records=vectors)
connect.flush([collection])
connect.create_index(collection, index_type, index_param)
status, res = connect.count_collection(collection)
assert res == nb
# @pytest.mark.level(2)
# def test_count_without_connection(self, collection, dis_connect):
# '''
# target: test count_collection, without connection
# method: calling count_collection with correct params, with a disconnected instance
# expected: count_collection raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.count_collection(collection)
def test_collection_rows_count_no_vectors(self, connect, collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str()
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(collection)
assert res == 0
# TODO: enable
@pytest.mark.level(2)
@pytest.mark.timeout(20)
def _test_collection_rows_count_multiprocessing(self, connect, collection, args):
'''
target: test collection rows_count is correct or not with multiprocess
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(collection_name=collection, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.count_collection(collection)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=rows_count, args=(milvus, ))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str()
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_collection_rows_count(self, connect, ip_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=ip_collection, records=vectors)
connect.flush([ip_collection])
status, res = connect.count_collection(ip_collection)
assert res == nb
def test_collection_rows_count_after_index_created(self, connect, ip_collection, get_simple_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
nb = 100
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=ip_collection, records=vectors)
connect.flush([ip_collection])
connect.create_index(ip_collection, index_type, index_param)
status, res = connect.count_collection(ip_collection)
assert res == nb
# @pytest.mark.level(2)
# def test_count_without_connection(self, ip_collection, dis_connect):
# '''
# target: test count_collection, without connection
# method: calling count_collection with correct params, with a disconnected instance
# expected: count_collection raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.count_collection(ip_collection)
def test_collection_rows_count_no_vectors(self, connect, ip_collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(ip_collection)
assert res == 0
# TODO: enable
@pytest.mark.timeout(60)
def _test_collection_rows_count_multiprocessing(self, connect, ip_collection, args):
'''
target: test collection rows_count is correct or not with multiprocess
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(collection_name=ip_collection, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.count_collection(ip_collection)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=rows_count, args=(milvus,))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of IP
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_collection_rows_count_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountJAC:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_collection_rows_count(self, connect, jac_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=jac_collection, records=vectors)
connect.flush([jac_collection])
status, res = connect.count_collection(jac_collection)
assert res == nb
def test_collection_rows_count_after_index_created(self, connect, jac_collection, get_jaccard_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=jac_collection, records=vectors)
connect.flush([jac_collection])
connect.create_index(jac_collection, index_type, index_param)
status, res = connect.count_collection(jac_collection)
assert res == nb
# @pytest.mark.level(2)
# def test_count_without_connection(self, jac_collection, dis_connect):
# '''
# target: test count_collection, without connection
# method: calling count_collection with correct params, with a disconnected instance
# expected: count_collection raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.count_collection(jac_collection)
def test_collection_rows_count_no_vectors(self, connect, jac_collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(jac_collection)
assert res == 0
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of IP
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
tmp, vectors = gen_binary_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_collection_rows_count_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountBinary:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_collection_rows_count(self, connect, ham_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=ham_collection, records=vectors)
connect.flush([ham_collection])
status, res = connect.count_collection(ham_collection)
assert res == nb
def test_collection_rows_count_substructure(self, connect, substructure_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=substructure_collection, records=vectors)
connect.flush([substructure_collection])
status, res = connect.count_collection(substructure_collection)
assert res == nb
def test_collection_rows_count_superstructure(self, connect, superstructure_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=superstructure_collection, records=vectors)
connect.flush([superstructure_collection])
status, res = connect.count_collection(superstructure_collection)
assert res == nb
def test_collection_rows_count_after_index_created(self, connect, ham_collection, get_hamming_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_type = get_hamming_index["index_type"]
index_param = get_hamming_index["index_param"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=ham_collection, records=vectors)
connect.flush([ham_collection])
connect.create_index(ham_collection, index_type, index_param)
status, res = connect.count_collection(ham_collection)
assert res == nb
def test_collection_rows_count_after_index_created_substructure(self, connect, substructure_collection, get_substructure_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_type = get_substructure_index["index_type"]
index_param = get_substructure_index["index_param"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=substructure_collection, records=vectors)
connect.flush([substructure_collection])
connect.create_index(substructure_collection, index_type, index_param)
status, res = connect.count_collection(substructure_collection)
assert res == nb
def test_collection_rows_count_after_index_created_superstructure(self, connect, superstructure_collection, get_superstructure_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_type = get_superstructure_index["index_type"]
index_param = get_superstructure_index["index_param"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=superstructure_collection, records=vectors)
connect.flush([superstructure_collection])
connect.create_index(superstructure_collection, index_type, index_param)
status, res = connect.count_collection(superstructure_collection)
assert res == nb
# @pytest.mark.level(2)
# def test_count_without_connection(self, ham_collection, dis_connect):
# '''
# target: test count_collection, without connection
# method: calling count_collection with correct params, with a disconnected instance
# expected: count_collection raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.count_collection(ham_collection)
def test_collection_rows_count_no_vectors(self, connect, ham_collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(ham_collection)
assert res == 0
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of IP
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
tmp, vectors = gen_binary_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_collection_rows_count_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountTANIMOTO:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
20000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_tanimoto_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_collection_rows_count(self, connect, tanimoto_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=tanimoto_collection, records=vectors)
connect.flush([tanimoto_collection])
status, res = connect.count_collection(tanimoto_collection)
assert status.OK()
assert res == nb
|
pysurge.py | import logging
import multiprocessing
import os
import queue
import signal
import sys
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from pydoc import locate
log = logging.getLogger(__name__)
class TestCase:
"""
Base class for a test case that is run by the perf tester
Any TestCase should implement the below methods.
"""
@classmethod
def startup(cls):
"""Defines session-level startup work that applies to all tests of this same type.
Anything stored here will be a class attr and therefore accessible to
all the instances of the test that are kicked off by TestRunner
TestRunner will run this before starting load.
Even if you run multiple instances of the same test (but with different
kwargs), the startup will run only ONCE per test class in each child
process.
"""
raise NotImplementedError
@classmethod
def shutdown(cls):
"""Defines session-level shutdown work that applies to all tests of this same type.
TestRunner will run this after load runs stop.
"""
raise NotImplementedError
def __init__(self, **kwargs):
"""Define any attrs you want to store at instantiation.
Note that this should not be confused with self.setup() below. The
main usage for self.__init__ would be to initialize any kwarg vars that
relate to your test case's "configuration". For example, a parameter
or a boolean.
The kwargs are configured in the perf test YAML configuration.
"""
super().__init__()
self.kwargs = kwargs
# Metrics is a dict with arbitrary keys, with values of ints/float that a TestCase can save
# for each test run. The recorded values are averaged at the end of the run for reporting
# purposes
#
# A metric is only counted towards the average when a test *passes*
self.metrics = {}
def __str__(self):
return f"<{self.__class__.__name__} kwargs={self.kwargs}>"
@property
def summary(self):
"""Return a short summary of what this test does (for reporting purposes)."""
raise NotImplementedError
@property
def description(self):
"""Returns a description for this specific test case (string)
Since the description is used by TestRunner for logging purposes,
the description should only depend on attrs defined in self.__init__,
self.startup, and self.shutdown
"""
raise NotImplementedError
@property
def max_duration(self):
"""Returns the maximum time in sec that an instance of this test may take to run"""
raise NotImplementedError
def setup(self):
"""Defines setup for a single execution of this test."""
raise NotImplementedError
def teardown(self):
"""Defines teardown for a single execution of this test."""
raise NotImplementedError
def run(self):
"""Defines the procedure to actually run the test.
Any Exception raised in here will be marked as a test failure by TestRunner.
"""
raise NotImplementedError
class TestRunner:
"""
Runs a TestCase at a specific rate in a thread pool and keeps track of successes/failures
Args:
test_cls: The TestCase test to run
rate (float): The rate to run test_cls at, in # of tests/per sec
"""
def __init__(self, test_cls, test_cls_kwargs, rate, debug=False):
self._stop = threading.Event()
self._stop.set()
self._lock = threading.Lock()
self.test_cls = test_cls
self.test_cls_kwargs = test_cls_kwargs
self.test_instance = test_cls(**test_cls_kwargs)
self.rate = rate # num tests per sec
self.active_tests = 0
self.successes = 0
self.failures = 0
self.test_run_time = 0
self.metrics = {}
self.debug = debug
@property
def running(self):
return not self._stop.is_set() or self.active_tests > 0
def _test_runner(self):
"""Runs a single test in a thread, stores results."""
with self._lock:
self.active_tests += 1
try:
test = self.test_cls(**self.test_cls_kwargs)
log.debug("test '%s' in setup()", test.description)
test.setup()
log.debug("test '%s' in run()", test.description)
test.run()
with self._lock:
for metric, value in test.metrics.items():
if metric not in self.metrics:
self.metrics[metric] = []
self.metrics[metric].append(value)
log.debug(
"test '%s' storing metric '%s' value '%f'", test.description, metric, value
)
self.successes += 1
log.debug("test '%s' passed", test.description)
except Exception as exc:
log.error("test '%s' hit error: %s", test.description, str(exc))
with self._lock:
self.failures += 1
if self.debug:
raise
finally:
try:
log.debug("test '%s' in teardown()", test.description)
test.teardown()
except Exception as exc:
log.exception("test '%s' hit error in teardown: %s", test.description, str(exc))
finally:
with self._lock:
self.active_tests -= 1
def stop(self):
"""Trigger tests to stop and finish up."""
if self._stop.is_set():
return
log.info(
"TestRunner stopping for %s... %d active threads need to finish",
str(self.test_cls),
self.active_tests,
)
self._stop.set()
def _thread_pool_submitter(self):
"""Handles the continuous loop to submit tests to the thread pool executor."""
start_time = time.time()
max_duration = self.test_instance.max_duration
# Based on Little's Law
# Max number of concurrent tests = (num tests fired/sec) * (max duration in sec of test)
workers = self.rate * max_duration
executor = ThreadPoolExecutor(max_workers=workers)
rate_of_fire = 1.0 / self.rate # 1 test fired every 'rate_of_fire' seconds
while not self._stop.is_set():
try:
executor.submit(self._test_runner)
except RuntimeError as err:
if "can't start new thread" in str(err):
log.error("unable to start new thread! desired rate won't be achieved")
else:
raise
time.sleep(rate_of_fire)
executor.shutdown()
stop_time = time.time()
self.test_run_time = stop_time - start_time
def run(self):
"""
Start running tests at the defined rate.
Starts a thread that takes care of submitting tests to a thread pool, so
this is a non-blocking call.
"""
self.successes = 0
self.failures = 0
self._stop.clear()
t = threading.Thread(target=self._thread_pool_submitter)
t.daemon = True
t.start()
class ChildProcess(multiprocessing.Process):
"""
A child process which runs multiple TestRunner instances.
One ChildProcess runs per CPU on the machine executing load tests. The total amount of tests
running for each test case is spread across all the ChildProcesses by adjusting their rate:
child process rate = total rate / # processors
"""
@staticmethod
def _test_cls_shutdown(test_cls):
"""Run shutdown for a test_cls and log any exception."""
try:
log.debug("test class %s calling shutdown()", test_cls.__name__)
test_cls.shutdown()
return True
except Exception:
log.exception("Warning: shutdown for %s failed.", str(test_cls))
return False
@staticmethod
def _test_cls_startup(test_cls):
"""Run the startup for a test_cls and log any exception."""
try:
log.debug("test class %s calling startup()", test_cls.__name__)
test_cls.startup()
return True
except Exception:
log.exception("Startup for %s failed! Not running this test.", str(test_cls))
return False
def __init__(
self, config, run_start_delay, start_event, stop_event, result_queue, proc_num, debug=False
):
self.config = config
self.proc_run_start_delay = run_start_delay
self.start_event = start_event
self.stop_event = stop_event
self.result_queue = result_queue
self.perf_testers = []
self.processors = multiprocessing.cpu_count()
self.startup_attempted = []
self.startup_successful = []
self.proc_num = proc_num
self.debug = debug
super().__init__()
def _stop_and_send_results(self):
"""
Stop all perf testers and wait for them to complete
Results are then pushed onto the result queue.
TODO: in future results may be pushed concurrently in real time to a 'reporter'
"""
for perf_tester in self.perf_testers:
perf_tester.stop()
while any([pt.running for pt in self.perf_testers]):
time.sleep(1)
for test_cls in self.startup_attempted:
self._test_cls_shutdown(test_cls)
results = []
for pt in self.perf_testers:
# Instantiate the test class w/ kwargs so we can get the unique name/summary properties
test = pt.test_instance
test_name = f"{str(test)} -- {test.summary}"
results.append(
{
"test_name": test_name,
"successes": pt.successes,
"failures": pt.failures,
"metrics": pt.metrics,
}
)
self.result_queue.put(results)
def _init_test_runners(self):
# Run startup once per test class
for test_config in self.config["tests"]:
test_cls = test_config["test_class"]
if test_cls not in self.startup_attempted:
if self._test_cls_startup(test_cls):
self.startup_successful.append(test_cls)
self.startup_attempted.append(test_cls)
total_rate_this_proc = 0.0
# Run only the test classes that passed startup successfully
for test_config in self.config["tests"]:
rate_per_proc = test_config["rate"] / self.processors
test_cls = test_config["test_class"]
test_kwargs = test_config.get("kwargs", {})
if test_cls in self.startup_successful:
pt = TestRunner(test_cls, test_kwargs, rate_per_proc, self.debug)
self.perf_testers.append(pt)
total_rate_this_proc += pt.rate
return total_rate_this_proc
def run(self):
"""
The logic running within each child process.
Takes care of:
1. running session startup for each test case class
2. launching a TestRunner for that TestCase
3. initiating the start of the TestRunners (only if their startup worked)
4. trigger stop of the TestRunners
5. running session shutdown for each test case class
6. sending results back to the main process on result_queue
"""
signal.signal(signal.SIGINT, signal.SIG_IGN) # worker procs ignore sigint
try:
total_rate_this_proc = self._init_test_runners()
except Exception:
log.exception("Child process hit error during startup")
self.result_queue.put("STARTUP_FAILED")
return
log.info("Sending 'startup done' to main proc and waiting for it to tell me to start...")
self.result_queue.put("STARTUP_DONE")
self.start_event.wait()
# Wait for instruction from the main proc to start firing tests
if len(self.startup_successful) < 1:
log.error(
"No tests to run on this child process... startup likely failed on all of them"
)
else:
log.info("Sleeping for proc start delay: %f", self.proc_run_start_delay)
time.sleep(self.proc_run_start_delay)
log.info("Done sleeping")
# Stagger the start of the perf testers within this proc to more evenly space test fires
# Also shuffle the order in which they start
if self.proc_num % 2:
perf_testers = reversed(self.perf_testers)
else:
perf_testers = self.perf_testers
for count, pt in enumerate(perf_testers):
delay = (1 / total_rate_this_proc) * count
time.sleep(delay)
pt.run()
# Wait until trigger to stop is received
self.stop_event.wait()
self._stop_and_send_results()
class Manager:
"""Handles running multiple TestRunners, splitting them across ChildProcesses."""
@staticmethod
def _validate_test_cls(test_cls, test_cls_kwargs):
instance = test_cls(**test_cls_kwargs)
try:
instance.max_duration
except NotImplementedError:
raise AttributeError(
f"{type(instance)} must have max_duration property defined"
) from None # avoid printing the original chained error
def __init__(self, config, duration, debug=False):
# TODO: real config management
self.config = config
self.processors = multiprocessing.cpu_count() # integer value
self.result_queue = multiprocessing.Queue()
self.start_event = multiprocessing.Event()
self.stop_event = multiprocessing.Event()
self.start_time = 0.0
self.stop_time = 0.0
self.duration = duration
self.debug = debug
# Append cwd to sys.path so we can load adhoc test classes
sys.path.insert(0, os.getcwd())
# Map the test class string in the config into an actual imported class
log.info("Validating test classes...")
for test in self.config["tests"]:
test_cls = test["test_class"]
test_cls_kwargs = test.get("kwargs", {})
imported_cls = locate(test_cls)
if not imported_cls:
raise ValueError(f"Unable to import test class '{test_cls}'")
test["test_class"] = imported_cls
self._validate_test_cls(imported_cls, test_cls_kwargs)
# Remove freshly added cwd from path
sys.path.pop(0)
def _aggregate_results(self, results):
"""Take the results returned by each proc and aggregate."""
aggregated = {}
# Aggregate all the values
for result_list in results:
for result in result_list:
test_name = result["test_name"]
successes = result["successes"]
failures = result["failures"]
metrics = result["metrics"]
if test_name not in aggregated:
aggregated[test_name] = {
"successes": 0,
"failures": 0,
"metrics": {},
}
# Add up successes/failures
aggregated[test_name]["successes"] += successes
aggregated[test_name]["failures"] += failures
# Aggregate metric values
aggregated_metrics = aggregated[test_name]["metrics"]
if successes > 0:
for metric, values in metrics.items():
if metric not in aggregated_metrics:
aggregated_metrics[metric] = values
else:
aggregated_metrics[metric] += values
# Print aggregated results (and calc avg for each metric)
for test_name, result in aggregated.items():
successes = result["successes"]
failures = result["failures"]
metrics = result["metrics"]
log.info("---- Results for test '%s' ----", test_name)
log.info(
">>> Ran for %f sec, fired %d total tests",
self.stop_time - self.start_time,
successes + failures,
)
log.info(">>> Successes: %d, failures: %d", successes, failures)
log.info("---- Metrics for test '%s' ----", test_name)
for metric, values in metrics.items():
log.info(">>> %s: %f", metric, sum(values) / len(values))
log.info("\n\n")
def stop(self):
log.info("STOPPING -- Telling child processes to begin shutting down...")
self.stop_event.set()
self.stop_time = time.time()
def _signal_handler(self, signal, frame):
log.info("CTRL+C HIT -- stopping")
self.stop()
def start(self):
"""
Start running the perf tests.
They will run until the load run duration is reached, or until ctrl+C is pressed
"""
# ctrl+C signal should set the stop event to trigger child procs to stop
signal.signal(signal.SIGINT, self._signal_handler)
self.child_procs = []
self.start_event.clear()
self.stop_event.clear()
total_rate = sum([test_config["rate"] for test_config in self.config["tests"]])
for count in range(0, self.processors):
# stagger the start of each proc so they fire tests evenly spaced
delay = (1 / total_rate) * count
proc = ChildProcess(
self.config,
delay,
self.start_event,
self.stop_event,
self.result_queue,
count,
self.debug,
)
proc.start()
self.child_procs.append(proc)
# wait for the 'startup' to finish
results = []
log.info("Waiting to receive 'startup done' from child procs...")
failed = False
while len(results) < len(self.child_procs):
try:
result = self.result_queue.get(block=False)
if result == "STARTUP_FAILED":
log.debug("Startup for a child process failed")
failed = True
elif result != "STARTUP_DONE":
log.debug("Expected a STARTUP_DONE result, got something else")
failed = True
results.append(result)
except queue.Empty:
time.sleep(0.1)
if failed:
raise Exception("Startup of child processes failed")
# Now trigger the actual run
log.info("Starting...")
self.start_time = time.time()
self.start_event.set()
if self.duration:
time_to_stop = self.start_time + self.duration
else:
time_to_stop = None
# Wait for the results from each proc to come in
results = []
while len(results) < len(self.child_procs):
if time_to_stop and time.time() >= time_to_stop and not self.stop_event.is_set():
self.stop()
try:
results.append(self.result_queue.get(block=False))
except queue.Empty:
time.sleep(0.1)
if self.stop_time == 0.0:
# There was some error and tests weren't stopped cleanly...
self.stop_time = time.time()
self._aggregate_results(results)
|
test_ca.py |
# for standalone-test
import sys
sys.path.append(".")
import unittest
import time
import threading
try:
# Python27
import Queue as queue
except ImportError:
# Python35
import queue
import j1939
class TestCA(unittest.TestCase):
# TODO: should we change the async_can_feeder to use the can backend with
# bustype 'virtual' instead of injecting our messages directly?
class MsgType(object):
CANRX = 0
CANTX = 1
PDU = 2
def _async_can_feeder(self):
"""Asynchronous feeder"""
while True:
message = self.message_queue.get(block=True)
if message is self.STOP_THREAD:
break
recv_time = message[3]
if recv_time == 0.0:
recv_time = time.time()
self.ecu.notify(message[1], message[2], recv_time)
def _inject_messages_into_ecu(self):
while self.can_messages and self.can_messages[0][0] == TestCA.MsgType.CANRX:
message = self.can_messages.pop(0)
self.message_queue.put(message)
def _send_message(self, can_id, data):
"""Will be used instead of the usual ecu.send_message method.
Checks the message sent and generates the apropriate answer.
The data is fed from self.can_messages.
"""
expected_data = self.can_messages.pop(0)
self.assertEqual(expected_data[0], TestCA.MsgType.CANTX, "No transmission was expected")
self.assertEqual(can_id, expected_data[1])
self.assertSequenceEqual(data[::-1], expected_data[2])
self._inject_messages_into_ecu()
def _on_message(self, pgn, data):
"""Feed incoming message to this testcase.
:param int pgn:
Parameter Group Number of the message
:param bytearray data:
Data of the PDU
"""
expected_data = self.pdus.pop(0)
self.assertEqual(expected_data[0], TestCA.MsgType.PDU)
self.assertEqual(pgn, expected_data[1])
self.assertSequenceEqual(data[::-1], expected_data[2])
def setUp(self):
"""Called before each test methode.
Method called to prepare the test fixture. This is called immediately
before calling the test method; other than AssertionError or SkipTest,
any exception raised by this method will be considered an error rather
than a test failure. The default implementation does nothing.
"""
self.can_messages = []
self.pdus = []
self.STOP_THREAD = object()
self.message_queue = queue.Queue()
self.message_thread = threading.Thread(target=self._async_can_feeder)
self.message_thread.start()
self.ecu = j1939.ElectronicControlUnit()
# redirect the send_message from the can bus to our simulation
self.ecu.send_message = self._send_message
def tearDown(self):
"""Called after each test methode.
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised an
exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception,
other than AssertionError or SkipTest, raised by this method will be
considered an additional error rather than a test failure (thus
increasing the total number of reported errors). This method will only
be called if the setUp() succeeds, regardless of the outcome of the
test method. The default implementation does nothing.
"""
self.ecu.stop()
self.message_queue.put(self.STOP_THREAD)
self.message_thread.join()
def test_addr_claim_fixed(self):
"""Test CA Address claim on the bus with fixed address
This test runs a "Single Address Capable" claim procedure with a fixed
address of 128.
"""
self.can_messages = [
(TestCA.MsgType.CANTX, 0x18EEFF80, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed
]
name = j1939.Name(
arbitrary_address_capable=0,
industry_group=j1939.Name.IndustryGroup.Industrial,
vehicle_system_instance=2,
vehicle_system=155,
function=201,
function_instance=16,
ecu_instance=2,
manufacturer_code=666,
identity_number=1234567
)
# create new CA on the bus with given NAME and ADDRESS
new_ca = self.ecu.add_ca(name=name, device_address=128)
# by starting the CA it announces the given ADDRESS on the bus
new_ca.start()
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.500)
self.assertEqual(new_ca.state, j1939.ControllerApplication.State.NORMAL)
def test_addr_claim_fixed_veto_lose(self):
"""Test CA Address claim on the bus with fixed address and a veto counterpart
This test runs a "Single Address Capable" claim procedure with a fixed
address of 128. A counterpart on the bus declines the address claimed message
with a veto and we lose our address.
"""
self.can_messages = [
(TestCA.MsgType.CANTX, 0x18EEFF80, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed
(TestCA.MsgType.CANRX, 0x18EEFF80, [83, 54, 111, 130, 83, 82, 214, 135], 0.0), # Veto from Counterpart with lower name
(TestCA.MsgType.CANTX, 0x18EEFFFE, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # CANNOT CLAIM
]
name = j1939.Name(
arbitrary_address_capable=0,
industry_group=j1939.Name.IndustryGroup.Industrial,
vehicle_system_instance=2,
vehicle_system=155,
function=201,
function_instance=16,
ecu_instance=2,
manufacturer_code=666,
identity_number=1234567
)
# create new CA on the bus with given NAME and ADDRESS
new_ca = self.ecu.add_ca(name=name, device_address=128)
# by starting the CA it announces the given ADDRESS on the bus
new_ca.start()
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.500)
self.assertEqual(new_ca.state, j1939.ControllerApplication.State.CANNOT_CLAIM)
def test_addr_claim_fixed_veto_win(self):
"""Test CA Address claim on the bus with fixed address and a veto counterpart
This test runs a "Single Address Capable" claim procedure with a fixed
address of 128. A counterpart on the bus declines the address claimed message
with a veto, but our name is less.
"""
self.can_messages = [
(TestCA.MsgType.CANTX, 0x18EEFF80, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed
(TestCA.MsgType.CANRX, 0x18EEFF80, [83, 54, 222, 130, 83, 82, 214, 135], 0.0), # Veto from Counterpart with higher name
(TestCA.MsgType.CANTX, 0x18EEFF80, [83, 54, 201, 130, 83, 82, 214, 135], 0.0), # resend Address Claimed
]
name = j1939.Name(
arbitrary_address_capable=0,
industry_group=j1939.Name.IndustryGroup.Industrial,
vehicle_system_instance=2,
vehicle_system=155,
function=201,
function_instance=16,
ecu_instance=2,
manufacturer_code=666,
identity_number=1234567
)
# create new CA on the bus with given NAME and ADDRESS
new_ca = self.ecu.add_ca(name=name, device_address=128)
# by starting the CA it announces the given ADDRESS on the bus
new_ca.start()
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.500)
self.assertEqual(new_ca.state, j1939.ControllerApplication.State.NORMAL)
def test_addr_claim_arbitrary_veto_lose(self):
"""Test CA Address claim on the bus with arbitrary capability a veto counterpart
This test runs a "Arbitrary Address Capable" claim procedure with an
address of 128. A counterpart on the bus declines the address claimed message
with a veto and we lose our address. Our device should claim the next address
(129) automatically.
"""
self.can_messages = [
(TestCA.MsgType.CANTX, 0x18EEFF80, [211, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed 128
(TestCA.MsgType.CANRX, 0x18EEFF80, [83, 54, 111, 130, 83, 82, 214, 135], 0.0), # Veto from Counterpart with lower name
(TestCA.MsgType.CANTX, 0x18EEFF81, [211, 54, 201, 130, 83, 82, 214, 135], 0.0), # Address Claimed 129
]
name = j1939.Name(
arbitrary_address_capable=1,
industry_group=j1939.Name.IndustryGroup.Industrial,
vehicle_system_instance=2,
vehicle_system=155,
function=201,
function_instance=16,
ecu_instance=2,
manufacturer_code=666,
identity_number=1234567
)
# create new CA on the bus with given NAME and ADDRESS
new_ca = self.ecu.add_ca(name=name, device_address=128)
# by starting the CA it announces the given ADDRESS on the bus
new_ca.start()
# wait until all messages are processed asynchronously
while len(self.can_messages)>0:
time.sleep(0.500)
# wait for final processing
time.sleep(0.500)
self.assertEqual(new_ca.state, j1939.ControllerApplication.State.NORMAL)
if __name__ == '__main__':
unittest.main()
|
test_big_privacy.py | #!/usr/bin/python
from cloudlight import BigGraph
from cloudlight import PrivacyAttackStrategies
from cloudlight.utils.itertools_recipes import izip
from cloudlight.utils.estimator import TimeEstimator
import time, sys, os, random
from multiprocessing import Process, Queue
cache_size_pages = 2**16
filename = len(sys.argv) > 1 and sys.argv[1] or None
if not filename:
filename = '/tesis/lj-10k-test.big_graph'
#print 'Error: first argument missing, input filename with BigGraph archive!'
#exit(-1)
outname = len(sys.argv) > 2 and sys.argv[2] or None
if not outname:
outname = '/tesis/lj-test.privacy'
#print 'Error: second argument missing, output filename!'
#exit(-1)
type = len(sys.argv) > 3 and sys.argv[3] or None
if not type:
type = 'no_triangles'
#print 'INFO: third argument missing, attack type (passive, supernode, triangles or no_degree), assuming passive!'
#type = 'all' #'test' #'all'
coverage = len(sys.argv) > 4 and sys.argv[4] or None
if not coverage: # or not coverage in ['node', 'link','korolova', 'triangle']:
#coverage = 'link'
print 'INFO: fourth argument missing, coverage type ("complete node" or "link"), assuming "complete node"!'
coverage_funcs = ['node'] #,'link','triangle','korolova']
else:
coverage_funcs = coverage.split(',')
end_cov = len(sys.argv) > 5 and sys.argv[5] or None
if not end_cov :
#coverage = 'link'
print 'INFO: fifth argument missing, coverage end (from 5 to 100), assuming 70!'
end_cov = 40
else:
end_cov = int(end_cov)
#coverage_type = coverage
debug = True
lookaheads = [0] #,1,2] #,3]
begin_cov = 5
#end_cov = 100
coverages = [float(i)/100 for i in range(begin_cov,end_cov+5,5)]
if not type or type == 'passive':
strats = [
'start_degree',
'start_greedy',
'start_crawler',
'start_random'
]
elif type == 'triangles':
strats = [
'start_triangles',
'start_greedy_triangles',
'start_crawler_triangles',
'start_crawler_seen_triangles'
]
elif type == 'no_degree':
strats = [
'start_crawler_seen_degree',
'start_crawler_degree_hist',
'start_crawler_degree_hist_bin_dist',
'start_crawler_degree_hist_bin_dist_rand',
]
elif type == 'all':
strats = [
'start_crawler_random',
'start_crawler_seen_degree',
'start_crawler_degree_hist',
'start_crawler_degree_hist_bin_dist',
'start_crawler_degree_aprox_hist_bin_dist',
# 'start_random',
# 'start_degree',
#'start_crawler_degree_hist_bin_dist_orderby_triangles',
#'start_crawler_seen_triangles',
#'start_crawler_degree_hist_bin_dist_rand',
##'start_greedy',
#'start_crawler',
#'start_triangles',
##'start_greedy_triangles',
#'start_crawler_triangles',
]
elif type == 'test':
strats = [
'start_degree',
'start_random',
#'start_greedy',
# BUG!!! Excepcion por None node!
# VIENE DE QUE EL GRAFO ES DISCONEXO, POR ESO NEXT_NODE_CRAWLER_UNSEEN_DEGREE DA NONE!!!
#'start_crawler',
#'start_triangles',
# BUG!!! Loop infinito siempre soborna al mismo nodo!
#'start_greedy_triangles',
# Idem 'start_crawler_triangles',
#'start_greedy_seen_degree',
# Bug!!! Excepcion por None node! 'start_crawler_seen_degree',
'start_greedy_seen_triangles',
#'start_crawler_seen_triangles',
'start_crawler_degree_hist',
'start_crawler_degree_hist_bin_dist',
'start_crawler_degree_hist_bin_dist_rand',
]
elif type == 'supernode':
lookaheads = [2,3]
strats = ['start_supernode_degree', 'start_supernode_greedy', 'start_supernode_crawler', 'start_supernode_random']
else:
raise Exception('Unsupported set of strategies!! %s' % type)
max_effort_fraction = 0.01
# pruebas
# passive korolova 100% DONE!
# no_degree link 100% DONE!
# triangles triangle 100% DONE!
# triangles2 triangle 70% DONE!
# viejas
# no_degree link 100% DONE!
# no_degree korolova 100% DONE!
# passive link 100% DONE!
# passive korolova 100% DONE!
# triangles triangle 90% DONE!
# triangles link 100% DONE!
# triangles korolova 100% DONE!
# no_degree triangle ERROR with start_greedy_seen_degree, no more nodes available, coverage still < 100% :(
# passive triangle ERROR with start_greedy_seen_degree, no more nodes available, coverage still < 100% :(
def coverage_map(func):
if 'node' == func:
return 'node_coverage'
if 'link' == func:
return 'link_coverage'
if 'korolova' == func or 'complete_node' == func:
return 'complete_node_coverage'
if 'triangle' == func:
return 'triangle_coverage'
raise Exception('bad mapping from converage function to coverage name! %s' % str(func))
q = Queue()
def run(q, strategy, coverages, coverage_funcs, max_effort):
estimator = TimeEstimator(len(coverages)*len(coverage_funcs))
coverages = dict( [ (coverage_map(cov_func), coverages) for cov_func in coverage_funcs] )
for cost, coverage, coverage_type in strat(coverages, max_effort):
#print '%d %.2f %s %d' % (l, coverage, strat_name.replace('crawler','crawlr'), cost)
if cost >= 0: # not finished yet...
estimator.tick()
log_line = estimator.log_line()
q.put( '%d %.7f %s %s %d %f %s\n' % (l, coverage, coverage_type, strat_name.replace('crawler','crawlr'), cost, float(cost)/graph.number_of_nodes(), log_line) )
else:
q.put( 'FINISHED' )
create_dbs = True
if create_dbs:
for strat_name in strats:
for l in lookaheads:
new_filename = filename + '.%s.lookahead%d' % (strat_name, l)
print 'creating BigGraph ' + new_filename
os.system('cp %s %s' % (filename, new_filename))
print 'choosing random node seed for crawlers...'
graph = BigGraph(filename, cache_size_pages)
node_rand_seed = graph.random_nodes()[0]
del graph
print 'done. choosed -> %s' % str(node_rand_seed)
processors = len(strats)*len(lookaheads) # 4
out = open(outname,'w')
processes = []
graphs = []
list_of_strategies = []
results = 0
for strat_name in strats:
for l in lookaheads:
new_filename = filename + '.%s.lookahead%d' % (strat_name, l)
graph = BigGraph(new_filename, cache_size_pages)
max_effort = graph.number_of_nodes() * max_effort_fraction #
graph.debug = debug
graph.input_debug_links = 200000
graph.output_debug_nodes = 10000
graph.max_links_input = 1*10**8
graph.max_nodes_analysis = 100000000
print 'PrivacyAttackStrategies with %s ...' % new_filename
strategies = PrivacyAttackStrategies(graph, l, coverage_funcs, debug)
strategies.node_rand_seed = node_rand_seed
strat = eval('strategies.%s' % strat_name)
p = Process(target=run, args=(q,strat, coverages, coverage_funcs, max_effort))
p.start()
processes.append( p )
graphs.append( graph )
list_of_strategies.append( strategies )
# results += len(coverages)*len(coverage_funcs)
# if len(processes) >= processors: #len(strats)*len(lookaheads):
# for _ in range(results):
# res = q.get()
# out.write(res)
# out.flush()
# map(lambda x: x.join(), processes)
# processes = []
# graphs = []
# list_of_strategies = []
# results = 0
finished_processes = len(strats)*len(lookaheads)
while finished_processes > 0:
res = q.get()
if res != 'FINISHED':
out.write(res)
out.flush()
else:
finished_processes -= 1 # another process finished
out.close()
|
server.py | import socket
import threading
import random
import sched
import time
import sys
import numpy
import face_recognition
import mysql.connector
import communication_json
import insertdb
import utility
MAX_ACODE = 1000
# server info
server_ip = '192.168.152.108'
student_port = 60000
teacher_port = 60001
#updater_port = 60002
# database info
dbinfo = {'host': 'localhost',
'user': 'root',
'password': '',
'port': 3306,
'database': 'sas'}
# attendance closes automatically after 10 minutes if teacher doesn't close it
ATTENDANCE_TIMEOUT = 10 * 60
ATTENDANCE_TIMEOUT_CHECK = 10 # checks every 10 seconds for timeout of attendance
attendance_scheduler = sched.scheduler(time.time, time.sleep)
# class Attendance:
# def __init__(self, tid, acode, aid):
## self.tid = tid
## self.acode = acode
## self.aid = aid
# insert { classid:(teacherid, acode, aID) } to attendance active to start attendance
active_attendance = {}
# insert { classid: studentids[]} for student whose attendance is left to be shown to corresponding teacher
students_present = {}
def connect2db(_dbinfo=dbinfo):
'''returns cursor to the mysql database mentioned in dbinfo dictionary'''
try:
mysqlconn = mysql.connector.connect(host=_dbinfo['host'], user=_dbinfo['user'], password=_dbinfo['password'],
port=_dbinfo['port'], database=_dbinfo['database'])
mycursor = mysqlconn.cursor()
return mysqlconn, mycursor
except mysql.connector.Error as e:
raise
def sendSQLserverError(conn):
response = {}
response['error'] = 'Couldn\'t connect to attendance server. please try again after a moment'
communication_json.convertSendClose(response, conn)
def studentHandler(conn):
data = communication_json.readall(conn)
print(data['sid'])
response = {}
# find the class of the student
class_query = 'SELECT cID FROM student WHERE sID = "{}"'.format(
data['sid'])
# print("DGFD")
if data['face'] == None or len(data['face']) != 128:
response['error'] = 'Face data not supplied'
communication_json.convertSendClose(response, conn)
return
try:
mysqlconn, mycursor = connect2db()
try:
mycursor.execute(class_query)
# print("DGFD")
res = mycursor.fetchone()
if res == None:
response['error'] = 'You are not registered for any class'
communication_json.convertSendClose(response, conn)
return
data['cid'] = res[0]
# print("data cid = ", data['cid'])
except:
raise
finally:
mycursor.close()
except mysql.connector.Error as e:
print(e)
sendSQLserverError(conn)
return
# print("DfhcvhGFD")
if not data['cid'] in active_attendance:
response['error'] = 'Class is not taking attendance at the moment'
communication_json.convertSendClose(response, conn)
return
else:
if active_attendance[data['cid']][1] != data['acode']:
response['error'] = 'Attendance code wrong'
communication_json.convertSendClose(response, conn)
return
elif data['sid'] in students_present[data['cid']]:
response['error'] = 'Attendance already marked'
communication_json.convertSendClose(response, conn)
return
else:
try:
mysqlconn, mycursor = connect2db()
try:
# *first check the student is registered for classid*
student_facedata_query = 'SELECT embedding FROM facedata WHERE sID = "{}" ORDER BY `index`'.format(
data['sid'])
mycursor.execute(student_facedata_query)
result = mycursor.fetchall()
facedata = []
if len(result) == 0:
response['error'] = 'Your face is not registered. Contact the administrator'
elif len(result) != 128:
print('Face data insufficient in database')
sendSQLserverError(conn)
return
else:
for res in result:
facedata.append(res[0])
# compare facedata
match = face_recognition.compare_faces(
[numpy.array(facedata)], numpy.array(data['face']))
if match[0]:
# if face match then update
mark_attendance_query = 'UPDATE record SET presence = true WHERE aID = {0} AND sID = "{1}"'.format(
active_attendance[data['cid']][2], data['sid'])
mycursor.execute(mark_attendance_query)
mysqlconn.commit()
# add student_id to students_present[];
students_present[data['cid']].append(data['sid'])
# send attendance status to 'socket' and save in database;
response['success'] = 'Attendance marked'
communication_json.convertSendClose(response, conn)
return
else:
response['error'] = 'Face didn\'t match. Please try again'
communication_json.convertSendClose(response, conn)
return
except mysql.connector.Error as e:
sendSQLserverError(conn)
return
finally:
mycursor.close()
except mysql.connector.Error as e:
sendSQLserverError(conn)
return
def studentConnectionListen():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sl:
sl.bind((server_ip, student_port))
sl.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
while(True):
sl.listen()
conn, addr = sl.accept()
# start new thread;
t = threading.Thread(target=studentHandler, args=(conn,))
t.start()
print("Connected to student at: ", addr)
def removeClassFromAttendance(class_id):
if class_id in active_attendance:
# remove the class from active attendance list
del active_attendance[class_id]
del students_present[class_id] # remive students list
def attendanceTimeout():
# see time of each started attendance in active_attendance and close if maximum time has elapsed
while True:
if attendance_scheduler.empty():
time.sleep(ATTENDANCE_TIMEOUT_CHECK)
else:
attendance_scheduler.run()
# def teacherAttendanceLogFeedback(conn, class_id):
# if student_present[] is not empty send the student_ids and remove from the list
# close this thread if corresponding class_id has been removed from active attendance list
# return
##
# def getStudentList_json(class_id):
# get student list and their id from database for class_id
# save it in proper python structure
# return the json format data
# return
##
# def teacherExists(db_cursor):
# return
def getNewAttendanceCode(_ACODE=MAX_ACODE):
# select a random unique number as code
acode_unique = False
while not acode_unique:
acode = random.randint(0, _ACODE)
acode_unique = True
for key in active_attendance:
if active_attendance[key][1] == acode:
acode_unique = False
break
return acode
def teacherHandler(conn):
data = communication_json.readall(conn)
response = {}
# check if attendance is in progress or not to start/stop attendance
if data['attendance'] == 'end':
# stopping attendance
if data['cid'] in active_attendance:
if active_attendance[data['cid']][0] == data['tid']:
# same teacher must close attendance
# remove the class from active attendance list
del active_attendance[data['cid']]
del students_present[data['cid']] # remive students list
response['success'] = 'Attendance stopped'
communication_json.convertSendClose(response, conn)
return
# perform other cleanup...
else:
response['error'] = 'Another teacher started attendance for this class'
communication_json.convertSendClose(response, conn)
return
else:
response['error'] = 'No attendance in progress for the class'
communication_json.convertSendClose(response, conn)
return
elif data['attendance'] == 'start':
# starting attendance
if data['cid'] in active_attendance:
response['error'] = 'Attendance already started'
communication_json.convertSendClose(response, conn)
return
# check the classid and teacherid in data are correct consulting database
try:
mysqlconn, mycursor = connect2db()
try:
teacher_exists_query = 'SELECT tID FROM sas.teacher WHERE tID = "{}"'.format(
data['tid'])
mycursor.execute(teacher_exists_query)
res = mycursor.fetchone()
if res == None:
mycursor.close()
response['error'] = 'You are not registered as teacher'
communication_json.convertSendClose(response, conn)
return
else:
class_exists_query = 'SELECT cID FROM sas.class WHERE cID = "{}"'.format(
data['cid'])
mycursor.execute(class_exists_query)
res = mycursor.fetchone()
if res == None:
response['error'] = 'Couldn\'t find class'
communication_json.convertSendClose(response, conn)
return
else:
subject_exists_query = 'SELECT scode FROM sas.subject WHERE scode = "{}"'.format(
data['scode'])
mycursor.execute(class_exists_query)
res = mycursor.fetchone()
if res == None:
response['error'] = 'Couldn\'t find subject'
communication_json.convertSendClose(response, conn)
return
else:
# a unique attendance identifier for current session
acode = getNewAttendanceCode()
# send student list with studentid, name and attendance code
classlist_query = 'SELECT sID, name FROM student WHERE cID = "{}"'.format(
data['cid'])
mycursor.execute(classlist_query)
result = mycursor.fetchall()
response['student_list'] = result
response['acode'] = acode
response['timeout'] = 'The attendance will close automatically in {} minutes if not explicitly closed'.format(
ATTENDANCE_TIMEOUT/60)
communication_json.convertSendClose(response, conn)
sidlist = [r[0] for r in result]
# make new attendance record in database
attendanceid = insertdb.insertAttendance(
data['tid'], data['scode'], data['cid'])
# adding records of students to the attendance for the respective class with default false for presence
insertdb.insertRecords(attendanceid, sidlist)
# add the classid:teacherid pair to active attendance list
active_attendance[data['cid']] = (
data['tid'], acode, attendanceid)
# initially no student present
students_present[data['cid']] = []
# stop attendance after timeout period if teacher doesn't close explicitly
attendance_scheduler.enter(
ATTENDANCE_TIMEOUT, 0, removeClassFromAttendance, argument=(data['cid'],))
# --- start new thread for attendance log feedback --- not applicable now
# --- wait for attendance stop message from teacher client --- not applicable now
except mysql.connector.Error as e:
sendSQLserverError(conn)
return
finally:
mycursor.close()
except mysql.connector.Error as e:
sendSQLserverError(conn)
return
elif data['attendance'] == 'get':
# send list of students whose attendance has been marked
if data['cid'] in active_attendance:
if active_attendance[data['cid']][0] == data['tid']:
# same teacher is only allowed to see realtime attendance data
response['student_list'] = students_present[data['cid']]
communication_json.convertSendClose(response, conn)
return
else:
response['error'] = 'Another teacher started attendance for this class'
communication_json.convertSendClose(response, conn)
return
else:
response['error'] = 'No attendance in progress for the class'
communication_json.convertSendClose(response, conn)
return
elif data['attendance'] == 'mark':
# send list of students whose attendance has been marked
if data['cid'] in active_attendance:
if active_attendance[data['cid']][0] == data['tid']:
# same teacher is only allowed to mark a student present
if not data['sid'] in students_present[data['cid']]:
try:
mysqlconn, mycursor = connect2db()
mark_attendance_query = 'UPDATE record SET presence = true WHERE aID = {0} AND sID = "{1}"'.format(
active_attendance[data['cid']][2], data['sid'])
try:
mycursor.execute(mark_attendance_query)
mysqlconn.commit()
students_present[data['cid']].append(data['sid'])
response['success'] = f'Attendance marked for {data["sid"]}'
communication_json.convertSendClose(response, conn)
return
except mysql.connector.Error as e:
response['error'] = 'Student ID wrong'
communication_json.convertSendClose(response, conn)
return
except mysql.connector.Error as e:
sendSQLserverError(conn)
return
else:
response['error'] = f'Attendance already marked for {data["sid"]}'
communication_json.convertSendClose(response, conn)
return
else:
response['error'] = 'Another teacher started attendance for this class'
communication_json.convertSendClose(response, conn)
return
else:
response['error'] = 'No attendance in progress for the class'
communication_json.convertSendClose(response, conn)
return
elif data['attendance'] == 'update':
# though the key is 'attendance' it has nothing to do with attendance
# this just sends updated list of class and subjects to teacher
classSubjectUpdater(conn, data['tid'])
return
def teacherConnectionListen():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((server_ip, teacher_port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
while(True):
s.listen()
conn, addr = s.accept()
# start new thread;
t = threading.Thread(target=teacherHandler, args=(conn,))
t.start()
print("Connected to teacher at: ", addr)
# class and subject data updater for teacher
def classSubjectUpdater(conn, tid):
response = {}
if tid == '':
response['error'] = "Please supply propoer teacher id"
communication_json.convertSendClose(response, conn)
return
else:
mysqlconn, mycursor = connect2db()
try:
teacher_exists_query = 'SELECT tID,name FROM sas.teacher WHERE tID = "{}"'.format(
tid)
mycursor.execute(teacher_exists_query)
res = mycursor.fetchone()
if res == None:
mycursor.close()
response['error'] = 'You are not registered as teacher'
communication_json.convertSendClose(response, conn)
return
else:
response['teacher_name'] = res[1]
except mysql.connector.Error as e:
print(e)
sendSQLserverError(conn)
return
finally:
mycursor.close()
try:
mysqlconn, mycursor = connect2db()
try:
classlist_query = 'SELECT cID, name FROM class INNER JOIN teaches USING (cID) WHERE tID = {0} AND teaches.`sem` != 0' .format(
tid)
mycursor.execute(classlist_query)
result = mycursor.fetchall()
response['class'] = result
subjectlist_query = 'SELECT scode, name FROM subject INNER JOIN teaches USING (scode) WHERE tID = {0} AND teaches.`sem` != 0'.format(
tid)
mycursor.execute(subjectlist_query)
result = mycursor.fetchall()
response['subject'] = result
communication_json.convertSendClose(response, conn)
except mysql.connector.Error as e:
print(e)
raise
finally:
mycursor.close()
except mysql.connector.Error as e:
# print(e)
sendSQLserverError(conn)
return
# def UpdateConnectionListen():
# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
## s.bind((server_ip, teacher_port))
# while(True):
# s.listen()
## conn, addr = s.accept()
# start new thread;
## t = threading.Thread(target= classSubjectUpdater, args = (conn,))
# t.start()
## print("Connected to teacher at: ", addr)
if __name__ == '__main__':
teacherlistener = threading.Thread(target=teacherConnectionListen)
studentlistener = threading.Thread(target=studentConnectionListen)
attendancetimer = threading.Thread(target=attendanceTimeout)
teacherlistener.daemon = True
studentlistener.daemon = True
attendancetimer.daemon = True
# --test--
# attendanceid = insertdb.insertAttendance(
# "001", "CT652", "075bctCd")
# active_attendance['075bctCd'] = ('001', 1, attendanceid)
# students_present['075bctCd'] = []
teacherlistener.start() # listen and handle teacher clients
studentlistener.start() # listen and handle student clients
# stop any attendance that has not been stopped explicitly by teacher within timeout period
attendancetimer.start()
while True:
endServer = input()
if endServer == "q" or endServer == "Q":
sys.exit()
# wait till all threads have returned
# teacherlistener.join()
# studentlistener.join()
# attendancetimer.join()
|
testing_lib.py | #!/usr/bin/env python
import parallel
import multiprocessing
import time
import uuid
# max ms spent sending/receiving each job
TRANSPORT_MS = 50
def worker_addresses(ip_addresses):
'''Returns a list of (vent, sink) pairs based on ip_addresses.'''
worker_addresses = []
for address in ip_addresses:
worker_addresses.append(('%s:%s' % (address, parallel.VENT_PORT_DEFAULT), '%s:%s' % (address, parallel.SINK_PORT_DEFAULT)))
return worker_addresses
def work(vent_port, sink_port, worker_pool):
'''Creates one basic worker.'''
def result_received(job_id, result, args):
pass
worker, close, run_job = parallel.construct_worker(worker_pool, {'vent_port': vent_port, 'sink_port': sink_port})
worker(result_received, ())
def pusher(vent_port, sink_port, worker_addresses, send_jobs, sender_args, on_recv_result, on_recv_result_args):
worker, close, run_job = parallel.construct_worker(worker_addresses, {'vent_port': vent_port, 'sink_port': sink_port})
send_jobs(run_job, sender_args)
worker(on_recv_result, on_recv_result_args)
def construct_worker_pool(num, worker_addresses, send_jobs, sender_args, on_recv_result, on_recv_result_args, num_pushers=1):
'''Constructs functions to start a pool of workers (some of which are also servers).'''
processes = []
def start(start_port=5000):
for i in range(num_pushers):
p = multiprocessing.Process(target=pusher, args=(start_port, start_port + 1, worker_addresses, send_jobs, sender_args, on_recv_result, on_recv_result_args))
p.start()
processes.append(p)
start_port += 2
for i in range(num - num_pushers):
p = multiprocessing.Process(target=work, args=(start_port, start_port + 1, worker_addresses))
p.start()
processes.append(p)
start_port += 2
def kill():
for p in processes:
p.terminate()
return start, kill
def check_for_completion(total_completed, num_jobs, timeout_ms):
'''Returns True if multiprocessing.RawValue total_completed is equal to int num_jobs before timeout_ms have passed.'''
tstart = time.time()
while (time.time() - tstart) < timeout_ms * 0.001:
if total_completed.value == num_jobs:
return True
return False
|
database.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
These tests check the database is functioning properly,
both in memory and in its file
"""
import datetime
import functools
import multiprocessing
import os
import pytest
import json
try:
import uuid
_use_uuid = True
except ImportError:
_use_uuid = False
pass
from jsonschema import validate
import llnl.util.lock as lk
from llnl.util.tty.colify import colify
import spack.repo
import spack.store
import spack.database
import spack.package
import spack.spec
from spack.util.mock_package import MockPackageMultiRepo
from spack.util.executable import Executable
from spack.schema.database_index import schema
pytestmark = pytest.mark.db
@pytest.fixture()
def test_store(tmpdir):
real_store = spack.store.store
spack.store.store = spack.store.Store(str(tmpdir.join('test_store')))
yield
spack.store.store = real_store
@pytest.fixture()
def upstream_and_downstream_db(tmpdir_factory, gen_mock_layout):
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
upstream_write_db = spack.database.Database(mock_db_root)
upstream_db = spack.database.Database(mock_db_root, is_upstream=True)
# Generate initial DB file to avoid reindex
with open(upstream_write_db._index_path, 'w') as db_file:
upstream_write_db._write_to_file(db_file)
upstream_layout = gen_mock_layout('/a/')
downstream_db_root = str(
tmpdir_factory.mktemp('mock_downstream_db_root'))
downstream_db = spack.database.Database(
downstream_db_root, upstream_dbs=[upstream_db])
with open(downstream_db._index_path, 'w') as db_file:
downstream_db._write_to_file(db_file)
downstream_layout = gen_mock_layout('/b/')
yield upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout
@pytest.mark.usefixtures('config')
def test_installed_upstream(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
x = mock_repo.add_package('x', [], [])
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [default])
mock_repo.add_package('w', [x, y], [default, default])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('w')
spec.concretize()
for dep in spec.traverse(root=False):
upstream_write_db.add(dep, upstream_layout)
upstream_db._read()
for dep in spec.traverse(root=False):
record = downstream_db.get_by_hash(dep.dag_hash())
assert record is not None
with pytest.raises(spack.database.ForbiddenLockError):
record = upstream_db.get_by_hash(dep.dag_hash())
new_spec = spack.spec.Spec('w')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
for dep in new_spec.traverse(root=False):
upstream, record = downstream_db.query_by_spec_hash(
dep.dag_hash())
assert upstream
assert record.path == upstream_layout.path_for_spec(dep)
upstream, record = downstream_db.query_by_spec_hash(
new_spec.dag_hash())
assert not upstream
assert record.installed
upstream_db._check_ref_counts()
downstream_db._check_ref_counts()
@pytest.mark.usefixtures('config')
def test_removed_upstream_dep(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
mock_repo.add_package('y', [z], [default])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('y')
spec.concretize()
upstream_write_db.add(spec['z'], upstream_layout)
upstream_db._read()
new_spec = spack.spec.Spec('y')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
upstream_write_db.remove(new_spec['z'])
upstream_db._read()
new_downstream = spack.database.Database(
downstream_db.root, upstream_dbs=[upstream_db])
new_downstream._fail_when_missing_deps = True
with pytest.raises(spack.database.MissingDependenciesError):
new_downstream._read()
@pytest.mark.usefixtures('config')
def test_add_to_upstream_after_downstream(upstream_and_downstream_db):
"""An upstream DB can add a package after it is installed in the downstream
DB. When a package is recorded as installed in both, the results should
refer to the downstream DB.
"""
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
mock_repo = MockPackageMultiRepo()
mock_repo.add_package('x', [], [])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
downstream_db.add(spec, downstream_layout)
upstream_write_db.add(spec, upstream_layout)
upstream_db._read()
upstream, record = downstream_db.query_by_spec_hash(spec.dag_hash())
# Even though the package is recorded as installed in the upstream DB,
# we prefer the locally-installed instance
assert not upstream
qresults = downstream_db.query('x')
assert len(qresults) == 1
queried_spec, = qresults
try:
orig_db = spack.store.db
spack.store.db = downstream_db
assert queried_spec.prefix == downstream_layout.path_for_spec(spec)
finally:
spack.store.db = orig_db
@pytest.mark.usefixtures('config')
def test_cannot_write_upstream(tmpdir_factory, test_store, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/']]
mock_repo = MockPackageMultiRepo()
mock_repo.add_package('x', [], [])
# Instantiate the database that will be used as the upstream DB and make
# sure it has an index file
upstream_db_independent = spack.database.Database(roots[1])
with upstream_db_independent.write_transaction():
pass
upstream_dbs = spack.store._construct_upstream_dbs_from_install_roots(
[roots[1]], _test=True)
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
with pytest.raises(spack.database.ForbiddenLockError):
upstream_dbs[0].add(spec, layouts[1])
@pytest.mark.usefixtures('config')
def test_recursive_upstream_dbs(tmpdir_factory, test_store, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b', 'c']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/', '/rc/']]
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [default])
mock_repo.add_package('x', [y], [default])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
db_c = spack.database.Database(roots[2])
db_c.add(spec['z'], layouts[2])
db_b = spack.database.Database(roots[1], upstream_dbs=[db_c])
db_b.add(spec['y'], layouts[1])
db_a = spack.database.Database(roots[0], upstream_dbs=[db_b, db_c])
db_a.add(spec['x'], layouts[0])
upstream_dbs_from_scratch = (
spack.store._construct_upstream_dbs_from_install_roots(
[roots[1], roots[2]], _test=True))
db_a_from_scratch = spack.database.Database(
roots[0], upstream_dbs=upstream_dbs_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec.dag_hash()) == (
db_a_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec['y'].dag_hash()) == (
upstream_dbs_from_scratch[0])
assert db_a_from_scratch.db_for_spec_hash(spec['z'].dag_hash()) == (
upstream_dbs_from_scratch[1])
db_a_from_scratch._check_ref_counts()
upstream_dbs_from_scratch[0]._check_ref_counts()
upstream_dbs_from_scratch[1]._check_ref_counts()
assert (db_a_from_scratch.installed_relatives(spec) ==
set(spec.traverse(root=False)))
assert (db_a_from_scratch.installed_relatives(
spec['z'], direction='parents') == set([spec, spec['y']]))
@pytest.fixture()
def usr_folder_exists(monkeypatch):
"""The ``/usr`` folder is assumed to be existing in some tests. This
fixture makes it such that its existence is mocked, so we have no
requirements on the system running tests.
"""
isdir = os.path.isdir
@functools.wraps(os.path.isdir)
def mock_isdir(path):
if path == '/usr':
return True
return isdir(path)
monkeypatch.setattr(os.path, 'isdir', mock_isdir)
def _print_ref_counts():
"""Print out all ref counts for the graph used here, for debugging"""
recs = []
def add_rec(spec):
cspecs = spack.store.db.query(spec, installed=any)
if not cspecs:
recs.append("[ %-7s ] %-20s-" % ('', spec))
else:
key = cspecs[0].dag_hash()
rec = spack.store.db.get_record(cspecs[0])
recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count))
with spack.store.db.read_transaction():
add_rec('mpileaks ^mpich')
add_rec('callpath ^mpich')
add_rec('mpich')
add_rec('mpileaks ^mpich2')
add_rec('callpath ^mpich2')
add_rec('mpich2')
add_rec('mpileaks ^zmpi')
add_rec('callpath ^zmpi')
add_rec('zmpi')
add_rec('fake')
add_rec('dyninst')
add_rec('libdwarf')
add_rec('libelf')
colify(recs, cols=3)
def _check_merkleiness():
"""Ensure the spack database is a valid merkle graph."""
all_specs = spack.store.db.query(installed=any)
seen = {}
for spec in all_specs:
for dep in spec.dependencies():
hash_key = dep.dag_hash()
if hash_key not in seen:
seen[hash_key] = id(dep)
else:
assert seen[hash_key] == id(dep)
def _check_db_sanity(database):
"""Utiilty function to check db against install layout."""
pkg_in_layout = sorted(spack.store.layout.all_specs())
actual = sorted(database.query())
externals = sorted([x for x in actual if x.external])
nexpected = len(pkg_in_layout) + len(externals)
assert nexpected == len(actual)
non_external_in_db = sorted([x for x in actual if not x.external])
for e, a in zip(pkg_in_layout, non_external_in_db):
assert e == a
_check_merkleiness()
def _check_remove_and_add_package(database, spec):
"""Remove a spec from the DB, then add it and make sure everything's
still ok once it is added. This checks that it was
removed, that it's back when added again, and that ref
counts are consistent.
"""
original = database.query()
database._check_ref_counts()
# Remove spec
concrete_spec = database.remove(spec)
database._check_ref_counts()
remaining = database.query()
# ensure spec we removed is gone
assert len(original) - 1 == len(remaining)
assert all(s in original for s in remaining)
assert concrete_spec not in remaining
# add it back and make sure everything is ok.
database.add(concrete_spec, spack.store.layout)
installed = database.query()
assert concrete_spec in installed
assert installed == original
# sanity check against direcory layout and check ref counts.
_check_db_sanity(database)
database._check_ref_counts()
def _mock_install(spec):
s = spack.spec.Spec(spec)
s.concretize()
pkg = spack.repo.get(s)
pkg.do_install(fake=True)
def _mock_remove(spec):
specs = spack.store.db.query(spec)
assert len(specs) == 1
spec = specs[0]
spec.package.do_uninstall(spec)
def test_default_queries(database):
# Testing a package whose name *doesn't* start with 'lib'
# to ensure the library has 'lib' prepended to the name
rec = database.get_record('zmpi')
spec = rec.spec
libraries = spec['zmpi'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'zmpi'
headers = spec['zmpi'].headers
assert len(headers) == 1
assert headers.names[0] == 'zmpi'
command = spec['zmpi'].command
assert isinstance(command, Executable)
assert command.name == 'zmpi'
assert os.path.exists(command.path)
# Testing a package whose name *does* start with 'lib'
# to ensure the library doesn't have a double 'lib' prefix
rec = database.get_record('libelf')
spec = rec.spec
libraries = spec['libelf'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'elf'
headers = spec['libelf'].headers
assert len(headers) == 1
assert headers.names[0] == 'libelf'
command = spec['libelf'].command
assert isinstance(command, Executable)
assert command.name == 'libelf'
assert os.path.exists(command.path)
def test_005_db_exists(database):
"""Make sure db cache file exists after creating."""
index_file = os.path.join(database.root, '.spack-db', 'index.json')
lock_file = os.path.join(database.root, '.spack-db', 'lock')
assert os.path.exists(str(index_file))
assert os.path.exists(str(lock_file))
with open(index_file) as fd:
index_object = json.load(fd)
validate(index_object, schema)
def test_010_all_install_sanity(database):
"""Ensure that the install layout reflects what we think it does."""
all_specs = spack.store.layout.all_specs()
assert len(all_specs) == 14
# Query specs with multiple configurations
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# Query specs with single configurations
dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich2')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^zmpi')]
) == 1
def test_015_write_and_read(mutable_database):
# write and read DB
with spack.store.db.write_transaction():
specs = spack.store.db.query()
recs = [spack.store.db.get_record(s) for s in specs]
for spec, rec in zip(specs, recs):
new_rec = spack.store.db.get_record(spec)
assert new_rec.ref_count == rec.ref_count
assert new_rec.spec == rec.spec
assert new_rec.path == rec.path
assert new_rec.installed == rec.installed
def test_017_write_and_read_without_uuid(mutable_database, monkeypatch):
monkeypatch.setattr(spack.database, '_use_uuid', False)
# write and read DB
with spack.store.db.write_transaction():
specs = spack.store.db.query()
recs = [spack.store.db.get_record(s) for s in specs]
for spec, rec in zip(specs, recs):
new_rec = spack.store.db.get_record(spec)
assert new_rec.ref_count == rec.ref_count
assert new_rec.spec == rec.spec
assert new_rec.path == rec.path
assert new_rec.installed == rec.installed
def test_020_db_sanity(database):
"""Make sure query() returns what's actually in the db."""
_check_db_sanity(database)
def test_025_reindex(mutable_database):
"""Make sure reindex works and ref counts are valid."""
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_026_reindex_after_deprecate(mutable_database):
"""Make sure reindex works and ref counts are valid after deprecation."""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_030_db_sanity_from_another_process(mutable_database):
def read_and_modify():
# check that other process can read DB
_check_db_sanity(mutable_database)
with mutable_database.write_transaction():
_mock_remove('mpileaks ^zmpi')
p = multiprocessing.Process(target=read_and_modify, args=())
p.start()
p.join()
# ensure child process change is visible in parent process
with mutable_database.read_transaction():
assert len(mutable_database.query('mpileaks ^zmpi')) == 0
def test_040_ref_counts(database):
"""Ensure that we got ref counts right when we read the DB."""
database._check_ref_counts()
def test_041_ref_counts_deprecate(mutable_database):
"""Ensure that we have appropriate ref counts after deprecating"""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
mutable_database._check_ref_counts()
def test_050_basic_query(database):
"""Ensure querying database is consistent with what is installed."""
# query everything
assert len(spack.store.db.query()) == 16
# query specs with multiple configurations
mpileaks_specs = database.query('mpileaks')
callpath_specs = database.query('callpath')
mpi_specs = database.query('mpi')
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# query specs with single configurations
dyninst_specs = database.query('dyninst')
libdwarf_specs = database.query('libdwarf')
libelf_specs = database.query('libelf')
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(database.query('mpileaks ^mpich')) == 1
assert len(database.query('mpileaks ^mpich2')) == 1
assert len(database.query('mpileaks ^zmpi')) == 1
# Query by date
assert len(database.query(start_date=datetime.datetime.min)) == 16
assert len(database.query(start_date=datetime.datetime.max)) == 0
assert len(database.query(end_date=datetime.datetime.min)) == 0
assert len(database.query(end_date=datetime.datetime.max)) == 16
def test_060_remove_and_add_root_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'mpileaks ^mpich')
def test_070_remove_and_add_dependency_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'dyninst')
def test_080_root_ref_counts(mutable_database):
rec = mutable_database.get_record('mpileaks ^mpich')
# Remove a top-level spec from the DB
mutable_database.remove('mpileaks ^mpich')
# record no longer in DB
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
# record's deps have updated ref_counts
assert mutable_database.get_record('callpath ^mpich').ref_count == 0
assert mutable_database.get_record('mpich').ref_count == 1
# Put the spec back
mutable_database.add(rec.spec, spack.store.layout)
# record is present again
assert len(mutable_database.query('mpileaks ^mpich', installed=any)) == 1
# dependencies have ref counts updated
assert mutable_database.get_record('callpath ^mpich').ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
def test_090_non_root_ref_counts(mutable_database):
mutable_database.get_record('mpileaks ^mpich')
mutable_database.get_record('callpath ^mpich')
# "force remove" a non-root spec from the DB
mutable_database.remove('callpath ^mpich')
# record still in DB but marked uninstalled
assert mutable_database.query('callpath ^mpich', installed=True) == []
assert len(mutable_database.query('callpath ^mpich', installed=any)) == 1
# record and its deps have same ref_counts
assert mutable_database.get_record(
'callpath ^mpich', installed=any
).ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
# remove only dependent of uninstalled callpath record
mutable_database.remove('mpileaks ^mpich')
# record and parent are completely gone.
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
assert mutable_database.query('callpath ^mpich', installed=any) == []
# mpich ref count updated properly.
mpich_rec = mutable_database.get_record('mpich')
assert mpich_rec.ref_count == 0
def test_100_no_write_with_exception_on_remove(database):
def fail_while_writing():
with database.write_transaction():
_mock_remove('mpileaks ^zmpi')
raise Exception()
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure zmpi is still there.
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
def test_110_no_write_with_exception_on_install(database):
def fail_while_writing():
with database.write_transaction():
_mock_install('cmake')
raise Exception()
with database.read_transaction():
assert database.query('cmake', installed=any) == []
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure cmake was not written.
with database.read_transaction():
assert database.query('cmake', installed=any) == []
def test_115_reindex_with_packages_not_in_repo(mutable_database):
# Dont add any package definitions to this repository, the idea is that
# packages should not have to be defined in the repository once they
# are installed
with spack.repo.swap(MockPackageMultiRepo()):
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_external_entries_in_db(mutable_database):
rec = mutable_database.get_record('mpileaks ^zmpi')
assert rec.spec.external_path is None
assert rec.spec.external_module is None
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
assert rec.spec.external_module is None
assert rec.explicit is False
rec.spec.package.do_install(fake=True, explicit=True)
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
assert rec.spec.external_module is None
assert rec.explicit is True
@pytest.mark.regression('8036')
def test_regression_issue_8036(mutable_database, usr_folder_exists):
# The test ensures that the external package prefix is treated as
# existing. Even when the package prefix exists, the package should
# not be considered installed until it is added to the database with
# do_install.
s = spack.spec.Spec('externaltool@0.9')
s.concretize()
assert not s.package.installed
# Now install the external package and check again the `installed` property
s.package.do_install(fake=True)
assert s.package.installed
@pytest.mark.regression('11118')
def test_old_external_entries_prefix(mutable_database):
with open(spack.store.db._index_path, 'r') as f:
db_obj = json.loads(f.read())
validate(db_obj, schema)
s = spack.spec.Spec('externaltool')
s.concretize()
db_obj['database']['installs'][s.dag_hash()]['path'] = 'None'
with open(spack.store.db._index_path, 'w') as f:
f.write(json.dumps(db_obj))
if _use_uuid:
with open(spack.store.db._verifier_path, 'w') as f:
f.write(str(uuid.uuid4()))
record = spack.store.db.get_record(s)
assert record.path is None
assert record.spec._prefix is None
assert record.spec.prefix == record.spec.external_path
def test_uninstall_by_spec(mutable_database):
with mutable_database.write_transaction():
for spec in mutable_database.query():
if spec.package.installed:
spack.package.PackageBase.uninstall_by_spec(spec, force=True)
else:
mutable_database.remove(spec)
assert len(mutable_database.query()) == 0
def test_query_unused_specs(mutable_database):
# This spec installs a fake cmake as a build only dependency
s = spack.spec.Spec('simple-inheritance')
s.concretize()
s.package.do_install(fake=True, explicit=True)
unused = spack.store.db.unused_specs
assert len(unused) == 1
assert unused[0].name == 'cmake'
@pytest.mark.regression('10019')
def test_query_spec_with_conditional_dependency(mutable_database):
# The issue is triggered by having dependencies that are
# conditional on a Boolean variant
s = spack.spec.Spec('hdf5~mpi')
s.concretize()
s.package.do_install(fake=True, explicit=True)
results = spack.store.db.query_local('hdf5 ^mpich')
assert not results
@pytest.mark.regression('10019')
def test_query_spec_with_non_conditional_virtual_dependency(database):
# Ensure the same issue doesn't come up for virtual
# dependency that are not conditional on variants
results = spack.store.db.query_local('mpileaks ^mpich')
assert len(results) == 1
def test_failed_spec_path_error(database):
"""Ensure spec not concrete check is covered."""
s = spack.spec.Spec('a')
with pytest.raises(ValueError, match='Concrete spec required'):
spack.store.db._failed_spec_path(s)
@pytest.mark.db
def test_clear_failure_keep(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when to be retained."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
s = spack.spec.Spec('a')
spack.store.db.clear_failure(s)
out = capfd.readouterr()[0]
assert 'Retaining failure marking' in out
@pytest.mark.db
def test_clear_failure_forced(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when force."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
# Ensure raise OSError when try to remove the non-existent marking
monkeypatch.setattr(spack.database.Database, 'prefix_failure_marked', _is)
s = spack.spec.Spec('a').concretized()
spack.store.db.clear_failure(s, force=True)
out = capfd.readouterr()[1]
assert 'Removing failure marking despite lock' in out
assert 'Unable to remove failure marking' in out
@pytest.mark.db
def test_mark_failed(mutable_database, monkeypatch, tmpdir, capsys):
"""Add coverage to mark_failed."""
def _raise_exc(lock):
raise lk.LockTimeoutError('Mock acquire_write failure')
# Ensure attempt to acquire write lock on the mark raises the exception
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise_exc)
with tmpdir.as_cwd():
s = spack.spec.Spec('a').concretized()
spack.store.db.mark_failed(s)
out = str(capsys.readouterr()[1])
assert 'Unable to mark a as failed' in out
# Clean up the failure mark to ensure it does not interfere with other
# tests using the same spec.
del spack.store.db._prefix_failures[s.prefix]
@pytest.mark.db
def test_prefix_failed(mutable_database, monkeypatch):
"""Add coverage to prefix_failed operation."""
def _is(db, spec):
return True
s = spack.spec.Spec('a').concretized()
# Confirm the spec is not already marked as failed
assert not spack.store.db.prefix_failed(s)
# Check that a failure entry is sufficient
spack.store.db._prefix_failures[s.prefix] = None
assert spack.store.db.prefix_failed(s)
# Remove the entry and check again
del spack.store.db._prefix_failures[s.prefix]
assert not spack.store.db.prefix_failed(s)
# Now pretend that the prefix failure is locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
assert spack.store.db.prefix_failed(s)
def test_prefix_read_lock_error(mutable_database, monkeypatch):
"""Cover the prefix read lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_read', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_read_lock(s):
assert False
def test_prefix_write_lock_error(mutable_database, monkeypatch):
"""Cover the prefix write lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_write_lock(s):
assert False
|
variable_scope.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
import copy
import enum
import functools
import sys
import threading
import traceback
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.types import core
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/resource_variables",
"Whether variable_scope.enable_resource_variables() is called.")
class _PartitionInfo:
"""Holds partition info used by initializer functions."""
__slots__ = ["_full_shape", "_var_offset"]
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape of
the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, (list, tuple)):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, (list, tuple)):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for offset, shape in zip(var_offset, full_shape):
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, (tuple, list)):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(shape),
self.full_shape,
len(self.full_shape)))
for i in range(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in range(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
@compatibility(TF2)
`tf.compat.v1.AUTO_REUSE` is a legacy API that is a no-op when TF2 behaviors
are enabled.
If you rely on `get_variable` and auto-reuse, see the
[model mapping guide](https://www.tensorflow.org/guide/migrate/model_mapping)
for more info on how to migrate your code.
Note: when you use the `tf.compat.v1.keras.utils.track_tf1_style_variables`
API as described in the above guide, `get_variable` will always behave as if
`v1.AUTO_REUSE` is set. Without the decorator, reuse will be ignored and new
variables will always be created, regardless of if they have already been
created.
@end_compatibility
When passed in as the value for the `reuse` flag, `AUTO_REUSE` indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
logging.vlog(1, "Enabling resource variables")
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["resource_variables_enabled"])
def resource_variables_enabled():
"""Returns `True` if resource variables are enabled.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
return _DEFAULT_USE_RESOURCE
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
logging.vlog(1, "Disabling resource variables")
_api_usage_gauge.get_cell().set(False)
def _needs_no_arguments(python_callable):
"""Returns true if the callable needs no arguments to call."""
# TODO(bfontain): Switch to inspect.signature when we are python 3 only.
# signature = inspect.signature(python_callable)
# return not [1 for param in signature.parameters.values()
# if param.default == param.empty]
num_arguments = len(tf_inspect.getargspec(python_callable).args)
if not tf_inspect.isfunction(python_callable) and not isinstance(
python_callable, functools.partial):
# getargspec includes self for function objects (which aren't
# functools.partial). This has no default so we need to remove it.
# It is not even an argument so its odd that getargspec returns this.
# Note that this is fixed with inspect.signature in Python 3.
num_arguments -= 1
return num_arguments == len(
tf_inspect.getargspec(python_callable).defaults or [])
class _VariableStore:
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys and
the corresponding TensorFlow Variables as values.
"""
__slots__ = ["_vars", "_partitioned_vars", "_store_eager_variables"]
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. The signature
of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes: `def
custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed: `def
custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs): return getter(name +
'_suffix', *args, **kwargs) ```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError("Passed a custom_getter which is not callable: %s" %
custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (
shape is not None and isinstance(shape, collections_abc.Sequence) and
not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError("Partitioner must be callable, but received: %s" %
partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable (defaults to
`DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
partitions = None
if not reuse or partitioner:
partitions = _call_partitioner(partitioner, shape, dtype)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s." % (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s." % (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (partitions is not None and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions)
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not." %
(num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d." %
(num_slices, name, name, num_slices))
vs = []
for i, (var_offset, var_shape) in enumerate(
_iter_slices(shape.as_list(), num_slices, slice_dim)):
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(
var_full_name + "/PartitionedInitializer", skip_on_eager=False):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=protected-access
var._set_save_slice_info(
variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset,
var_shape))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(
name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g.
a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines. In some
# cases the traceback can be longer (e.g. if someone uses factory
# functions to create variables) so we take more than needed in the
# default case.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
raise ValueError("%s Originally defined at:\n\n%s" %
(err_msg, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." %
(name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." %
(name, dtype_str, found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer()
if shape.is_fully_defined():
if "partition_info" in tf_inspect.getargspec(initializer).args:
init_val = functools.partial(initializer,
shape.as_list(),
dtype=dtype,
partition_info=partition_info)
else:
init_val = functools.partial(initializer,
shape.as_list(), dtype=dtype)
variable_dtype = dtype.base_dtype
elif _needs_no_arguments(initializer):
init_val = initializer
variable_dtype = None
else:
raise ValueError("The initializer passed is not valid. It should "
"be a callable with no arguments and the "
"shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be "
"fully defined.")
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
def make_regularizer_op():
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
return regularizer(v)
if regularizer(v) is not None:
lazy_eval_tensor = _LazyEvalTensor(make_regularizer_op)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
lazy_eval_tensor)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or
dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required" %
(name, dtype.base_dtype))
return initializer, initializing_from_value
class _LazyEvalTensor(core.Tensor):
"""A Tensor-like object that only evaluates its thunk when used."""
def __init__(self, thunk):
"""Initializes a _LazyEvalTensor object.
Args:
thunk: A callable. A thunk which computes the value of the tensor.
"""
self._thunk = thunk
self._master_tensor = thunk()
def _as_tensor(self, dtype=None, name=None, as_ref=False):
del name
assert not as_ref
assert dtype in [None, self.dtype]
return self._thunk()
def _make_master_property(name):
@property
def prop(self):
return getattr(self._master_tensor, name) # pylint: disable=protected-access
return prop
_master_property_list = ("device", "dtype", "graph", "name", "op", "shape",
"value_index")
for _name in _master_property_list:
setattr(_LazyEvalTensor, _name, _make_master_property(_name))
def _make_master_method(name):
def method(self, *args, **kwargs):
return getattr(self._master_tensor, name)(*args, **kwargs) # pylint: disable=protected-access
return method
_master_method_list = ("get_shape", "__str__", "shape_as_list")
for _name in _master_method_list:
setattr(_LazyEvalTensor, _name, _make_master_method(_name))
def _make_op_method(name):
def method(self, *args, **kwargs):
return getattr(self._as_tensor(), name)(*args, **kwargs) # pylint: disable=protected-access
return method
_op_list = ("__abs__", "__add__", "__and__", "__bool__", "__div__", "__eq__",
"__floordiv__", "__ge__", "__getitem__", "__gt__", "__invert__",
"__iter__", "__le__", "__len__", "__lt__", "__matmul__", "__mod__",
"__mul__", "__ne__", "__neg__", "__nonzero__", "__or__", "__pow__",
"__radd__", "__rand__", "__rdiv__", "__rfloordiv__", "__rmatmul__",
"__rmod__", "__rmul__", "__ror__", "__rpow__", "__rsub__",
"__rtruediv__", "__rxor__", "__sub__", "__truediv__", "__xor__",
"eval", "numpy")
for _name in _op_list:
setattr(_LazyEvalTensor, _name, _make_op_method(_name))
ops.register_tensor_conversion_function(
_LazyEvalTensor,
lambda val, dtype, name, as_ref: val._as_tensor(dtype, name, as_ref) # pylint: disable=protected-access
)
session.register_session_run_conversion_functions(
_LazyEvalTensor,
lambda fetch: ([fetch._master_tensor], lambda fetched_vals: fetched_vals[0]) # pylint: disable=protected-access
)
# To stop regularization, use this regularizer
@tf_export(v1=["no_regularizer"])
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope:
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults to
False (will later change to True). When eager execution is enabled this
argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None, skip_on_eager=False):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None, skip_on_eager=False):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope.
@compatibility(TF2)
Although it is a legacy `compat.v1` api,
`tf.compat.v1.get_variable` is compatible with eager
execution and `tf.function`
However, to maintain variable-scope based variable reuse
you will need to combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`. (Though
it will behave as if reuse is always set to `tf.compat.v1.AUTO_REUSE`.)
See the
[migration guide](https://www.tensorflow.org/guide/migrate/model_mapping)
for more info.
The TF2 equivalent, if you are just trying to track
variable name prefixes and not control `get_variable`-based variable reuse,
would be to use `tf.name_scope` and capture the output of opening the
scope (which represents the current name prefix).
For example:
```python
x = tf.name_scope('foo') as current_scope:
...
```
@end_compatibility
"""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore:
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.compat.v1.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in self._store._vars.items():
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(), name=stripped_var_name, trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
@compatibility(TF2)
Although it is a legacy `compat.v1` api,
`tf.compat.v1.get_variable` is mostly compatible with eager
execution and `tf.function` but only if you combine it with the
`tf.compat.v1.keras.utils.track_tf1_style_variables` decorator. (Though
it will behave as if reuse is always set to `AUTO_REUSE`.)
See the
[model migration guide](https://www.tensorflow.org/guide/migrate/model_mapping)
for more info.
If you do not combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`, `get_variable` will create
a brand new variable every single time it is called and will never reuse
variables, regardless of variable names or `reuse` arguments.
The TF2 equivalent of this symbol would be `tf.Variable`, but note
that when using `tf.Variable` you must make sure you track your variables
(and regularizer arguments) either manually or via `tf.Module` or
`tf.keras.layers.Layer` mechanisms.
A section of the
[migration guide](https://www.tensorflow.org/guide/migrate/model_mapping#incremental_migration_to_native_tf2)
provides more details on incrementally migrating these usages to `tf.Variable`
as well.
Note: The `partitioner` arg is not compatible with TF2 behaviors even when
using `tf.compat.v1.keras.utils.track_tf1_style_variables`. It can be replaced
by using `ParameterServerStrategy` and its partitioners. See the
[multi-gpu migration guide](https://www.tensorflow.org/guide/migrate/multi_worker_cpu_gpu_training)
and the ParameterServerStrategy guides it references for more info.
@end_compatibility
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.", "",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n", "", "GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to. Defaults
to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache on the
device where the Ops using the Variable reside, to deduplicate copying
through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a value
of unknown shape. If True, the default, the shape of initial_value must be
known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope: # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit
the parent scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
self._last_variable_scope_object = None
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" +
self._name_or_scope if self._old.name else self._name_or_scope)
self._reuse = (self._reuse or
self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
self._last_variable_scope_object = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
if (self._var_scope_store.current_scope is
not self._last_variable_scope_object):
raise RuntimeError("Improper nesting of variable_scope.")
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope:
"""A context manager for defining ops that creates variables (layers).
@compatibility(TF2)
Although it is a legacy `compat.v1` api,
`tf.compat.v1.variable_scope` is mostly compatible with eager
execution and `tf.function` as long as you combine it with the
`tf.compat.v1.keras.utils.track_tf1_style_variables` decorator (though
it will behave as if reuse is always set to `AUTO_REUSE`.)
See the
[model migration guide](www.tensorflow.org/guide/migrate/model_mapping)
for more info on
migrating code that relies on `variable_scope`-based variable reuse.
When you use it with eager execution enabled but without
`tf.compat.v1.keras.utils.track_tf1_style_variables`,
`tf.compat.v1.variable_scope` will still be able to prefix the names
of variables created within the scope but it will not enable variable reuse
or error-raising checks around variable reuse (`get_variable` calls within
it would always create new variables).
Once you have switched away from `get_variable`-based variable reuse
mechanisms, to switch to TF2 APIs you can just use
`tf.name_scope` to prefix variable names.
@end_compatibility
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
The Variable Scope works as expected when the Eager Execution is Disabled.
```python
tf.compat.v1.disable_eager_execution()
```
Simple example of how to create a new variable:
```python
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.compat.v1.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.compat.v1.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Keep in mind that the counters for `default_name` are discarded once the
parent scope is exited. Therefore when the code re-enters the scope (for
instance by saving it), all nested default_name counters will be restarted.
For instance:
```python
with tf.compat.v1.variable_scope("foo") as vs:
with tf.compat.v1.variable_scope(None, default_name="bar"):
v = tf.compat.v1.get_variable("a", [1])
assert v.name == "foo/bar/a:0", v.name
with tf.compat.v1.variable_scope(None, default_name="bar"):
v = tf.compat.v1.get_variable("b", [1])
assert v.name == "foo/bar_1/b:0"
with tf.compat.v1.variable_scope(vs):
with tf.compat.v1.variable_scope(None, default_name="bar"):
v = tf.compat.v1.get_variable("c", [1])
assert v.name == "foo/bar/c:0" # Uses bar instead of bar_2!
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("foo", reuse=True):
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.compat.v1.variable_scope("foo") as scope:
v = tf.compat.v1.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
v1 = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.compat.v1.variable_scope("foo", reuse=True):
v = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into
reuse mode for this scope as well as all sub-scopes; if
tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and
return them otherwise; if None, we inherit the parent scope's reuse
flag. When eager execution is enabled, new variables are always created
unless an EagerVariableStore or template is currently active.
dtype: type of variables created in this scope (defaults to the type in
the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is not
inherited, and it only takes effect for once when creating. You should
only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if (self._in_graph_mode and not self._building_function and
self._graph_context_manager is not None):
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope, (VariableScope, str)):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, str):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(
name_scope, skip_on_eager=False)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, str):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name, skip_on_eager=False)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
self._cached_pure_variable_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
try:
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg,
traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(
name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _call_partitioner(partitioner, shape, dtype):
"""Call partitioner validating its inputs/output.
Args:
partitioner: a function mapping `Tensor` shape and dtype to a list of
partitions.
shape: shape of the `Tensor` to partition, must have at least two
dimensions.
dtype: dtype of the elements in the `Tensor`.
Returns:
A list with elements >=1 and exactly one >1. The index of that
element corresponds to the partitioning axis.
"""
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable must be "
"fully defined, but instead was %s." % (shape,))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
slicing = partitioner(shape=shape, dtype=dtype)
if not isinstance(slicing, collections_abc.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s" %
slicing)
if len(slicing) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (slicing, shape))
if any(p < 1 for p in slicing):
raise ValueError("Partitioner returned zero partitions for some axes: %s" %
slicing)
if sum(p > 1 for p in slicing) > 1:
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, partitioning: %s" % (shape, slicing))
return slicing
# TODO(slebedev): could be inlined, but
# `_VariableStore._get_partitioned_variable` is too complex even
# without this logic.
def _get_slice_dim_and_num_slices(slicing):
"""Get slicing dimension and number of slices from the partitioner output."""
for slice_dim, num_slices in enumerate(slicing):
if num_slices > 1:
break
else:
# Degenerate case: no partitioning applied.
slice_dim = 0
num_slices = 1
return slice_dim, num_slices
def _iter_slices(full_shape, num_slices, slice_dim):
"""Slices a given a shape along the specified dimension."""
num_slices_with_excess = full_shape[slice_dim] % num_slices
offset = [0] * len(full_shape)
min_slice_len = full_shape[slice_dim] // num_slices
for i in range(num_slices):
shape = full_shape[:]
shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)
yield offset[:], shape
offset[slice_dim] += shape[slice_dim]
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
distribute_strategy = kwargs.get("distribute_strategy", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
else:
return variables.RefVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
expected_shape=expected_shape,
import_scope=import_scope,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
distribute_strategy = kwargs.get("distribute_strategy", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
* initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
* trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True`, unless `synchronization` is
set to `ON_READ`, in which case it defaults to `False`.
* collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
* validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
* caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
* name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
* dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
* constraint: A constraint function to be applied to the variable after
updates by some algorithms.
* use_resource: if True, a ResourceVariable is always created.
* synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
* aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
* initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
* trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
* validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
* caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
* name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
* constraint: A constraint function to be applied to the variable after
updates by some algorithms.
* synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
* aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
main_server.py | import socket
import threading
import time
time_1 = 0
time_clean = 3
chat_cancel = False
buffer = "\n\n\n\n\n\n\n*CHAT STARTS HERE*\n\n"
host_server = input("-> Host IP: ")
def clean_buffer():
global time_1
global time_clean
global buffer
global chat_cancel
while(chat_cancel==False):
time.sleep(1)
time_1+=1
if(time_1>=time_clean):
if(len(buffer)>1024):
buffer="\n\n\n\n\n\n\n*CHAT STARTS HERE*\n\n..."+buffer[524:]
def can_cancel():
global chat_cancel
global host_server
while(chat_cancel==False):
print("\n--If you want to close the server: Type \'0\' and Enter.--")
opt = input("")
if(opt=='0'):
chat_cancel = True
print("-- Closing --")
close = "__Close__"
server_socket.sendto(close.encode(), (host_server,1500))
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
local = ("",1500)
server_socket.bind(local)
mesage = ""
t1 = threading.Thread(target=clean_buffer)
t1.start()
t2 = threading.Thread(target=can_cancel)
t2.start()
print("\n\nOnline Server")
print("Waiting for data...\n")
while(chat_cancel==False):
receive,client = server_socket.recvfrom(2048)
mesage = receive.decode()
if(mesage==""):
server_socket.sendto(buffer.encode(), client)
else:
buffer+="\n"+mesage
print(buffer)
print("\n\n** Server Closed **")
print("\nClosing in 5 secs...")
time.sleep(5)
server_socket.close()
|
test_incoming.py | # -*- encoding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import uuid
import numpy
from gnocchi import incoming
from gnocchi import indexer
from gnocchi.tests import base as tests_base
class TestIncomingDriver(tests_base.TestCase):
def setUp(self):
super(TestIncomingDriver, self).setUp()
# A lot of tests wants a metric, create one
self.metric = indexer.Metric(
uuid.uuid4(),
self.archive_policies["low"])
def test_iter_on_sacks_to_process(self):
if (self.incoming.iter_on_sacks_to_process ==
incoming.IncomingDriver.iter_on_sacks_to_process):
self.skipTest("Incoming driver does not implement "
"iter_on_sacks_to_process")
found = threading.Event()
sack_to_find = self.incoming.sack_for_metric(self.metric.id)
def _iter_on_sacks_to_process():
for sack in self.incoming.iter_on_sacks_to_process():
self.assertIsInstance(sack, int)
if sack == sack_to_find:
found.set()
break
finder = threading.Thread(target=_iter_on_sacks_to_process)
finder.daemon = True
finder.start()
# Try for 30s to get a notification about this sack
for _ in range(30):
if found.wait(timeout=1):
break
# NOTE(jd) Retry to send measures. It cannot be done only once as
# there might be a race condition between the threads
self.incoming.finish_sack_processing(sack_to_find)
self.incoming.add_measures(self.metric, [
incoming.Measure(numpy.datetime64("2014-01-01 12:00:01"), 69),
])
else:
self.fail("Notification for metric not received")
|
batch_generator.py | # batch gen
import random
import h5py
import numpy as np
from scipy.ndimage.interpolation import rotate, shift, affine_transform, zoom
from numpy.random import random_sample, rand, random_integers, uniform
# import matplotlib.pyplot as plt
import cv2
from tqdm import tqdm
# from astropy.nddata.utils import block_reduce
# from staintools.LuminosityStandardizer import standardize
import staintools
import matplotlib.pyplot as plt
import scipy
from copy import deepcopy
# quite slow -> Don't use this! Not optimized and doesn't fit our problem!
def add_affine_transform2(input_im, output, max_deform):
random_20 = uniform(-max_deform, max_deform, 2)
random_80 = uniform(1 - max_deform, 1 + max_deform, 2)
mat = np.array([[1, 0, 0],
[0, random_80[0], random_20[0]],
[0, random_20[1], random_80[1]]]
)
input_im[:, :, :, 0] = affine_transform(input_im[:, :, :, 0], mat, output_shape=np.shape(input_im[:, :, :, 0]))
output[:, :, :, 0] = affine_transform(output[:, :, :, 0], mat, output_shape=np.shape(input_im[:, :, :, 0]))
output[:, :, :, 1] = affine_transform(output[:, :, :, 1], mat, output_shape=np.shape(input_im[:, :, :, 0]))
output[output < 0.5] = 0
output[output >= 0.5] = 1
return input_im, output
"""
###
input_im: input image, 5d ex: (1,64,256,256,1) , (dimi0, z, x, y, channel)
output: ground truth, 5d ex: (1,64,256,256,2), (dimi0, z, x, y, channel)
max_shift: the maximum amount th shift in a direction, only shifts in x and y dir
###
"""
# faster and sexier? - mvh André :)
def add_shift2(input_im, output, max_shift):
# randomly choose which shift to set for each axis (within specified limit)
sequence = [round(uniform(-max_shift, max_shift)), round(uniform(-max_shift, max_shift)), 0]
# apply shift to RGB-image
input_im = shift(input_im.copy(), sequence, order=0, mode='constant')
output[..., 1] = shift(output[..., 1], sequence[:2], order=0, mode='constant')
output[..., 0] = shift(output[..., 0], sequence[:2], order=0, mode='constant', cval=1)
return input_im, output
"""
####
input_im: input image, 5d ex: (1,64,256,256,1) , (dimi0, z, x, y, channel)
output: ground truth, 5d ex: (1,64,256,256,2), (dimi0, z, x, y, channel)
min/max_angle: minimum and maximum angle to rotate in deg, positive integers/floats.
####
"""
# -> Only apply rotation in image plane -> faster and unnecessairy to rotate xz or yz
def add_rotation2(input_im, output, max_angle):
# randomly choose how much to rotate for specified max_angle
angle_xy = round(uniform(-max_angle, max_angle))
# rotate chunks
input_im = rotate(input_im, angle_xy, axes=(0, 1), reshape=False, mode='constant', order=1)
output[..., 1] = rotate(output[..., 1], angle_xy, axes=(0, 1), reshape=False, mode='constant', order=1)
# threshold gt again, as we used linear interpolation after rotation
output[..., 1][output[..., 1] <= 0.5] = 0
output[..., 1][output[..., 1] > 0.5] = 1
output[..., 0] = 1 - output[..., 1]
output = output.astype(int).astype(np.float32)
return input_im, output
"""
flips the array along random axis, no interpolation -> super-speedy :)
"""
def add_flip2(input_im, output):
# randomly choose whether or not to flip
if (random_integers(0, 1) == 1):
# randomly choose which axis to flip against
flip_ax = random_integers(0, high=1)
# flip CT-chunk and corresponding GT
input_im = np.flip(input_im, flip_ax)
output = np.flip(output, flip_ax)
return input_im, output
"""
performs intensity transform on the chunk, using gamma transform with random gamma-value
"""
def add_gamma2(input_im, output, r_limits):
# limits
r_min, r_max = r_limits
# randomly choose gamma factor
r = uniform(r_min, r_max)
# RGB: float [0,1] -> uint8 [0,1]
input_im = (np.round(255. * input_im.copy())).astype(np.uint8)
# RGB -> HSV
input_im = cv2.cvtColor(input_im, cv2.COLOR_RGB2HSV).astype(np.float32)
input_im[..., 2] = np.clip(np.round(input_im[..., 2] ** r), a_min=0, a_max=255)
# HSV -> RGB
input_im = cv2.cvtColor(input_im.astype(np.uint8), cv2.COLOR_HSV2RGB)
# need to normalize again after augmentation
input_im = (input_im.astype(np.float32) / 255)
return input_im, output
def add_scaling2(input_im, output, r_limits):
min_scaling, max_scaling = r_limits
scaling_factor = np.random.uniform(min_scaling, max_scaling)
def crop_or_fill(image, shape):
image = np.copy(image)
for dimension in range(2):
if image.shape[dimension] > shape[dimension]:
# Crop
if dimension == 0:
image = image[:shape[0], :]
elif dimension == 1:
image = image[:, :shape[1], :]
else:
# Fill
if dimension == 0:
new_image = np.zeros((shape[0], image.shape[1], shape[2]))
new_image[:image.shape[0], :, :] = image
elif dimension == 1:
new_image = np.zeros((shape[0], shape[1], shape[2]))
new_image[:, :image.shape[1], :] = image
image = new_image
return image
input_im[..., :3] = crop_or_fill(scipy.ndimage.zoom(input_im[..., :3], [scaling_factor, scaling_factor, 1], order=1), input_im[..., :3].shape) # RGB only
input_im[..., 3] = np.squeeze(crop_or_fill(scipy.ndimage.zoom(np.expand_dims(input_im[..., 3], axis=-1), [scaling_factor, scaling_factor, 1], order=0), input_im.shape[:2] + (1,)), axis=-1) # heatmap only
output = crop_or_fill(scipy.ndimage.zoom(output, [scaling_factor, scaling_factor, 1], order=0), output.shape)
return input_im, output
def add_brightness_mult2(input_im, output, r_limits):
# limits
r_min, r_max = r_limits
# randomly choose multiplication factor
r = uniform(r_min, r_max)
# RGB: float [0,1] -> uint8 [0,1]
input_im = (np.round(255. * input_im.copy())).astype(np.uint8)
# RGB -> HSV
input_im = cv2.cvtColor(input_im, cv2.COLOR_RGB2HSV).astype(np.float32)
input_im[..., 2] = np.clip(np.round(input_im[..., 2] * r), a_min=0, a_max=255)
# HSV -> RGB
input_im = cv2.cvtColor(input_im.astype(np.uint8), cv2.COLOR_HSV2RGB)
# need to normalize again after augmentation
input_im = (input_im.astype(np.float32) / 255)
return input_im, output
def add_HEstain2(input_im, output):
# RGB: float [0,1] -> uint8 [0,1] to use staintools
input_im = (np.round(255. * input_im.astype(np.float32))).astype(np.uint8)
# input_im = input_im.astype(np.uint8)
# standardize brightness (optional -> not really suitable for augmentation?
# input_im = staintools.LuminosityStandardizer.standardize(input_im)
# define augmentation algorithm -> should only do this the first time!
if not 'augmentor' in globals():
global augmentor
# input_im = input_im[...,::-1]
# augmentor = staintools.StainAugmentor(method='vahadane', sigma1=0.2, sigma2=0.2) # <- best, but slow
augmentor = staintools.StainAugmentor(method='macenko', sigma1=0.1, sigma2=0.1) # <- faster but worse
# fit augmentor on current image
augmentor.fit(input_im)
# extract augmented image
input_im = augmentor.pop()
input_im = input_im.astype(np.float32) / 255.
return input_im, output
'''
def add_HEstain2_all(input_im, output):
input_shape = input_im.shape
# for each image in stack -> transform to RGB uint8
for i in range(input_im.shape[0]):
input_im[i] *= 255
input_im = input_im.astype(np.uint8)
# define augmentation algorithm -> should only do this the first time!
if not 'augmentor' in globals():
global augmentor
augmentor = staintools.StainAugmentor(method='vahadane', sigma1=0.2, sigma2=0.2)
# apply augmentation on all slices
augmentor.fit(input_im)
# for each image extract augmented images
input_out = np.zeros(input_shape)
#for i in range()
'''
def add_rotation2_ll(input_im, output):
# randomly choose rotation angle: 0, +-90, +,180, +-270
k = random_integers(0, high=3) # -> 0 means no rotation
# rotate
input_im = np.rot90(input_im, k)
output = np.rot90(output, k)
return input_im, output
def add_hsv2(input_im, output, max_shift):
# RGB: float [0,1] -> uint8 [0,1]
input_im = (np.round(255. * input_im.copy())).astype(np.uint8)
# RGB -> HSV
input_im = cv2.cvtColor(input_im, cv2.COLOR_RGB2HSV)
input_im = input_im.astype(np.float32)
## augmentation, only on Hue and Saturation channel
# hue
input_im[..., 0] = np.mod(input_im[..., 0] + round(uniform(-max_shift, max_shift)), 180)
# saturation
input_im[..., 1] = np.clip(input_im[..., 1] + round(uniform(-max_shift, max_shift)), a_min=0, a_max=255)
# input_im = (np.round(255*maxminscale(input_im.astype(np.float32)))).astype(np.uint8)
# input_im = np.round(input_im).astype(np.uint8)
input_im = input_im.astype(np.uint8)
# HSV -> RGB
input_im = cv2.cvtColor(input_im, cv2.COLOR_HSV2RGB)
# need to normalize again after augmentation
input_im = (input_im.astype(np.float32) / 255)
return input_im, output
import numba as nb
@nb.jit(nopython=True)
def sc_any(array):
for x in array.flat:
if x:
return True
return False
def maxminscale(tmp):
if sc_any(tmp):
tmp = tmp - np.amin(tmp)
tmp = tmp / np.amax(tmp)
return tmp
"""
aug: dict with what augmentation as key and what degree of augmentation as value
-> 'rotate': 20 , in deg. slow
-> 'shift': 20, in pixels. slow
-> 'affine': 0.2 . should be between 0.05 and 0.3. slow
-> 'flip': 1, fast
"""
def batch_gen2(file_list, batch_size, aug={}, shuffle_list=True, epochs=1):
while (True):
batch = 0
for filename in file_list:
file = h5py.File(filename, 'r')
input_shape = file['data'].shape
output_shape = file['label'].shape
file.close()
# for each epoch, clear batch
im = np.zeros((batch_size, input_shape[1], input_shape[2], input_shape[3]))
gt = np.zeros((batch_size, output_shape[1], output_shape[2], output_shape[3]))
for i in range(epochs):
if shuffle_list:
random.shuffle(file_list)
for filename in file_list:
file = h5py.File(filename, 'r')
input_im = np.array(file['data'], dtype=np.float32)
output = np.array(file['label'], dtype=np.float32)
heatmap = np.array(file['heatmap'], dtype=np.float32)
file.close()
input_im = np.squeeze(input_im, axis=0)
output = np.squeeze(output, axis=0)
heatmap = np.squeeze(heatmap, axis=0)
input_im = np.concatenate([input_im, heatmap], axis=-1)
# preprocessing
# input_im = input_im[0] # <- slow!
# input_im = np.squeeze(input_im, axis=0)
# RGB uint8 (to use staintools)
# input_im = (np.round(255 * maxminscale(input_im.copy()))).astype(np.uint8)
# standardize brightness
# input_im = staintools.LuminosityStandardizer.standardize(input_im.astype(np.uint8)).astype(np.float32)
# maxminscale # <- something wrong with this for RGB images???
# input_im[pat] = maxminscale(input_im[pat].copy())
# cv2.imshow('image', cv2.cvtColor(input_im[0], cv2.COLOR_RGB2BGR))
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# input_im = np.expand_dims(input_im, axis=0)
# del input_im, output
# apply specified agumentation on both image stack and ground truth
if 'rotate_ll' in aug:
input_im, output = add_rotation2_ll(input_im, output)
if 'flip' in aug:
input_im, output = add_flip2(input_im, output)
if 'stain' in aug: # <- do this first
input_im, output = add_HEstain2(input_im, output)
if 'hsv' in aug: # <- do this first
input_im, output = add_hsv2(input_im, output, aug['hsv'])
if 'gamma' in aug:
input_im, output = add_gamma2(input_im, output, aug['gamma'])
if 'mult' in aug:
input_im, output = add_brightness_mult2(input_im, output, aug['mult'])
if 'scale' in aug:
input_im, output = add_scaling2(input_im, output, aug['scale'])
if 'rotate' in aug: # -> do this last maybe?
input_im, output = add_rotation2(input_im, output, aug['rotate'])
if 'affine' in aug:
input_im, output = add_affine_transform2(input_im, output, aug['affine'])
if 'shift' in aug:
input_im, output = add_shift2(input_im, output, aug['shift'])
# normalize at the end
# im[batch] = im[batch] / 255.
# print(input_im.shape)
# print(output.shape)
# fig, ax = plt.subplots(1, 3)
##ax[0].imshow(input_im)
# ax[1].imshow(output[..., 0], cmap="gray")
# ax[2].imshow(output[..., 1], cmap="gray")
# plt.show()
im[batch] = np.expand_dims(input_im, axis=0)
gt[batch] = np.expand_dims(output, axis=0)
del input_im, output
batch += 1
if batch == batch_size:
batch = 0
yield im, gt
def batch_length(file_list):
length = len(file_list)
print('images in generator:', length)
return length
# file_list, batch_size, aug={}, shuffle_list=True, epochs=1
# @threadsafe_generator
from tensorflow.python.keras.utils.data_utils import Sequence
import multiprocessing as mp
from timeit import time
class mpBatchGeneratorCustom(Sequence):
def __init__(self, file_list, batch_size=1, verbose=False, aug=None, input_shape=(), nb_classes=2, N=1, classes=None, max_q_size=20, max_proc=8, heatmap_guiding=False,
deep_supervision=False, hprob=False, arch=None):
self.file_list = file_list
self.batch_size = batch_size
self.sample_list = []
self.verbose = verbose
self.transforms = []
self.batch_transforms = {}
self.aug = aug
self.N = N
self.classes = classes
self.input_shape = input_shape
self.nb_classes = nb_classes
self.output_shape = (self.batch_size, self.nb_classes)
self.patch_list = []
self.max_q_size = max_q_size
self.max_proc = max_proc
self.heatmap_guiding = heatmap_guiding
self.deep_supervision = deep_supervision
self.hprob = hprob
self.arch = arch
file = h5py.File(file_list[0], 'r')
self.input_shape = file['data'].shape
self.output_shape = file['label'].shape
file.close()
#'''
def __len__(self):
return int(np.ceil(self.N / self.batch_size))
def __getitem__(self, batch_index):
sample_indices_for_batch = self.file_list[batch_index * self.batch_size:(batch_index + 1) * self.batch_size]
return self._generate_batch(sample_indices_for_batch) # generator batch based on selected patch indices
'''
def __iter__(self):
"""Creates an infinite generator that iterate over the Sequence.
Yields:
Sequence items.
"""
while True:
for item in (self[i] for i in range(len(self))):
yield item
'''
#'''
def mpGenerator(self):
""" Use multiprocessing to generate batches in parallel. """
try:
#print("starting queue")
queue = mp.Queue(maxsize=self.max_q_size)
# define producer (putting items into queue)
def producer():
try:
#print("trying")
seed_N = None
#self.generate_random_patch(seed_N)
inputs = []
for i in range(self.batch_size):
inputs.append(self.generate_random_patch(seed_N))
X, y = self._generate_batch(inputs)
queue.put((X, y))
except:
print("Nothing here")
processes = []
def start_process():
print("starting process")
for i in range(len(processes), self.max_proc):
thread = mp.Process(target=producer)
time.sleep(0.01)
thread.start()
processes.append(thread)
# run as consumer (read items from queue, in current thread)
while True:
processes = [p for p in processes if p.is_alive()]
if len(processes) < self.max_proc:
start_process()
yield queue.get()
except:
# print("Finishing")
for th in processes:
th.terminate()
queue.close()
raise
def on_epoch_end(self):
self.on_epoch_begin() # generate patch_list before next epoch starts
def _transform_batch(self, input_im, output):
aug = self.aug
# apply specified agumentation on both image stack and ground truth
if 'rotate_ll' in aug:
if np.random.choice([0, 1]) == 1:
input_im, output = add_rotation2_ll(input_im, output)
if 'flip' in aug:
if np.random.choice([0, 1]) == 1:
input_im, output = add_flip2(input_im, output)
if 'stain' in aug: # <- do this first
if np.random.choice([0, 1]) == 1:
input_im[..., :3], output = add_HEstain2(input_im[..., :3], output) # assumed first three channels correspond to the RGB image
if 'hsv' in aug: # <- do this first
if np.random.choice([0, 1]) == 1:
input_im[..., :3], output = add_hsv2(input_im[..., :3], output, aug['hsv'])
if 'gamma' in aug:
if np.random.choice([0, 1]) == 1:
input_im, output = add_gamma2(input_im, output, aug['gamma'])
if 'mult' in aug:
if np.random.choice([0, 1]) == 1:
input_im[..., :3], output = add_brightness_mult2(input_im[..., :3], output, aug['mult'])
if 'scale' in aug:
if np.random.choice([0, 1]) == 1:
input_im, output = add_scaling2(input_im, output, aug['scale'])
if 'rotate' in aug: # -> do this last maybe?
if np.random.choice([0, 1]) == 1:
input_im, output = add_rotation2(input_im, output, aug['rotate'])
if 'affine' in aug:
if np.random.choice([0, 1]) == 1:
input_im, output = add_affine_transform2(input_im, output, aug['affine'])
if 'shift' in aug:
if np.random.choice([0, 1]) == 1:
input_im, output = add_shift2(input_im, output, aug['shift'])
return input_im, output
# def __iter__(self):
# return self.__next__()
def on_epoch_begin(self):
np.random.shuffle(self.file_list)
def _generate_batch(self, samples):
# for each epoch, clear batch
#im = np.zeros((self.batch_size, self.input_shape[1], self.input_shape[2], self.input_shape[3] + int(self.heatmap_guiding)))
#gt = np.zeros((self.batch_size, self.output_shape[1], self.output_shape[2], self.output_shape[3]))
im = []
gt = []
for batch, filename in enumerate(samples):
file = h5py.File(filename, 'r')
input_im = np.array(file['data'], dtype=np.float32)
output = np.array(file['label'], dtype=np.float32)
heatmap = np.array(file['heatmap'], dtype=np.float32)
file.close()
if not self.hprob:
heatmap = (heatmap >= 0.5).astype(np.float32) # threshold to produce binary heatmap
input_im = np.squeeze(input_im, axis=0)
output = np.squeeze(output, axis=0)
heatmap = np.squeeze(heatmap, axis=0)
if self.heatmap_guiding:
input_im = np.concatenate([input_im, heatmap], axis=-1)
# augment (image and GT only, no heatmap)
#input_im[..., :3], output = self._transform_batch(input_im[..., :3], output) # @TODO: Need to fix this to handle heatmap stuff also given as input...
input_im, output = self._transform_batch(input_im, output)
# if deep supervision is enabled, need to generate downsampled versions of the original GT
if self.deep_supervision:
hierarchical_gt = []
hierarchical_gt.append(output)
for i in range(1, 7): # @TODO: Should make this generic, iterable here should be an input to the generator
tmp = deepcopy(output)
limit = int(pow(2, i))
new_gt = tmp[0::limit, 0::limit]
#new_gt = np.expand_dims(new_gt, axis=0)
hierarchical_gt.append(new_gt)
output = hierarchical_gt
#else:
#output = np.expand_dims(output, axis=0)
# @TODO: Create downsampled versions of the GT for deep supervision
# ... add some code her ...
#im.append(np.expand_dims(input_im, axis=0))
im.append(input_im)
gt.append(output)
#im[batch] = np.expand_dims(input_im, axis=0)
#gt[batch] = np.expand_dims(output, axis=0)
if self.deep_supervision:
new_batches_y = []
for c in range(len(gt[0])):
flipped = []
for b in range(len(gt)):
flipped.append(gt[b][c])
new_batches_y.append(np.asarray(flipped))
gt = new_batches_y.copy()
del new_batches_y, flipped
else:
gt = np.array(gt)
im = np.array(im)
#gt = np.array(gt)
# for DoubleU-Net, we need to provide two identical outputs
if self.arch == "doubleunet":
gt = [gt, gt]
#print(im.shape)
#for g in gt:
# print(g.shape)
#print(im.shape)
#print(gt.shape)
#print(im.shape)
return im, gt # [gt[i] for i in range(gt.shape[0])])
|
httpCameraMotion.py | # signey julho 2018
# MIT License
# para fazer
# - diferença com contraste
# - tamanho da área atingida
# - área atingida na vertical ou na horizontal
# - área atingida se moveu
# add file /etc/pip.conf em nov/2018 (2 lines)
# [global]
# extra-index-url=https://www.piwheels.org/simple
# +
# install packages:
# - libjpeg6-turbo
# - libwebp + ( ln -s /usr/lib/libwebp.so.7 /usr/lib/libwebp.so.6 )
# - jasper + ( ln -s /usr/lib/libjasper.so.4 /usr/lib/libjasper.so.1 )
# - ( ln -s /usr/lib/libImath-2_4.so.24 /usr/lib/libImath-2_2.so.23 )
# - ( ln -s libIlmImf-2_4.so.24 libIlmImf-2_2.so.23 )
# - ( ln -s libIex-2_4.so.24 libIex-2_2.so.23 )
# - ( ln -s libHalf.so libHalf.so.23 )
# - ( ln -s libIlmThread-2_4.so.24 libIlmThread-2_2.so.23 )
# - ( ln -s libQt5Gui.so.5 libQtGui.so.4 )
# - ( ln -s libQt5Core.so.5 libQtCore.so.4 )
import sys
import traceback
import signal
import time
import http.server
import socketserver
import ssl
import io
#from io import StringIO
#from http import cookies
import http.cookies
import mimetypes
import os.path
from imageio import imwrite,imread
# camera
#import cv2
from picamera import PiCamera
from picamera.array import PiRGBArray
#import cv2
#from PIL import Image
#import numpy
import threading
import glob
#from skimage.transform import resize
#from scipy.misc import imread,imsave
from scipy.linalg import norm
from scipy import sum, average
def analizaMovSP(img1,img2):
global e_cron
# https://gist.github.com/astanin/626356
def del_gray(arr):
"# de img colorida-elimina tons cinza = sombras"
if len(arr.shape) == 3:
print (arr)
x = average(arr, -1)
print(len(x))
print(len(x[0]))
r = sum(abs(arr - average(arr, -1)))
if (r<20):
arr = [0,0,0]
return arr
else:
return arr
def to_grayscale(arr):
"If arr is a color image (3D array), convert it to grayscale (2D array)."
#tm.ev('gr');
#print 'gr'
" roda uma vez... "
if len(arr.shape) == 3:
return average(arr, -1) # average over the last axis (color channels)
else:
return arr
def normalize(arr):
amin = arr.min()
rng = arr.max()-amin
return (arr-amin)*255/rng
def compare_images(img1, img2):
# normalize to compensate for exposure difference
img1 = normalize(img1)
img2 = normalize(img2)
# calculate the difference and its norms
diff = img1 - img2 # elementwise for scipy arrays
m_norm = sum(abs(diff)) # Manhattan norm
z_norm = norm(diff.ravel(), 0) # Zero norm
return (m_norm, z_norm)
#
e_cron.start('an');
# read images as 2D arrays (convert to grayscale for simplicity)
if False:
img1 = img1.astype(float)
img2 = img2.astype(float)
else:
#print len(F1.shape)
img1 = to_grayscale(img1.astype(float))
#print len(img1.shape)
img2 = to_grayscale(img2.astype(float))
# compare
n_m, n_0 = compare_images(img1, img2)
#print "Manhattan norm:", n_m, "/ per pixel:", n_m/img1.size
#print "Zero norm:", n_0, "/ per pixel:", n_0*1.0/img1.size
e_cron.stop('an');
return n_m/img1.size
def getImageN():
global capturando,camera,exposAtual,rh
#continuos
global c_raw,c_init,monitRunning
dr = time.strftime("%H:%M", time.localtime())
capturando = True
e_cron.start('cp')
ti = time.time();
try:
#inicializar?
if c_init==0:
#c_raw = PiRGBArray(camera)
camera.start_preview()
time.sleep(0.2)
c_init = 1
c_raw = PiRGBArray(camera)
camera.capture_continuous(c_raw, format="bgr")
except:
c_init = 0
lg.erro(sys.exc_info(),"ERRO no capture...")
return False
e_cron.stop('cp')
capturando = False
img = c_raw.array
#ajuste noite/dia ?
if monitRunning and expos(img):
c_init = 0
legImage(img);
return img
def legImage(img):
" põe legenda na imagem "
global exposAtual,e_cron,e_imgPorSeg
# imagens por minuto.
tc = e_cron.get('cp');
e_imgPorSeg = '{:.2f}'.format(tc[0]/(e_cron.tempoTotal()))+"ips"
font = cv2.FONT_HERSHEY_SIMPLEX
tx = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) \
+ " " + str(exposAtual) \
+ " " + e_imgPorSeg \
+ " " + e_cron.perc()
cv2.putText(img,tx,(10,rh-10), font, 0.5,(255,255,255),1) #,cv2.LINE_AA)
def getImage():
global capturando,camera,exposAtual,rh,e_cron,c_raw
dr = time.strftime("%H:%M", time.localtime())
capturando = True
e_cron.start('cp')
#camera.start_preview()
#rawCapture = PiRGBArray(camera)
# allow the camera to warmup
#time.sleep(0.1)
# grab an image from the camera
try:
#print('r='+obj(c_raw)) # camera,size=None,array=(array:[],dtype:uint8)
c_raw = PiRGBArray(camera)
camera.capture(c_raw, format="bgr")
except:
lg.erro(sys.exc_info(),"ERRO no capture... "+str(e_cron.get('cp')[0]))
e_cron.stop('cp')
capturando = False
img = c_raw.array
#ajuste noite/dia ?
expos(img)
# põe legenda
# sem cv2 ... legImage(img)
return img
def expos(img):
" durante dia modo auto, a noite ajuste exposição manualmente "
" se imagem muito escura entra no modo manual noite "
global camera,exposAtual,rw,rh,camSleep,camSleepD,camSleepN,mCor,e_cron
def Set():
" seta exposição manual "
lg.print("camera.exposure_mode = 'off' _speed="+str(exposAtual)+"000 "+str(mCor))
camera.exposure_mode = 'off'
#int('vel',camera.shutter_speed)
camera.framerate=1000/exposAtual
camera.shutter_speed = exposAtual*1000
camSleep = camSleepN
#camera.iso = 800
#calcula a média do valor dos pixels, p/tentar
# descobrir luminosidade e regular noite/dia
e_cron.start('ex');
if False:
mc = average(img,-1) #media cor
mc1 = average(mc,-1) # media colunas
mCor = average(mc1,-1) # media linhas
else:
mCor = average(average(average(img,-1),-1),-1);
e_cron.stop('ex');
exIni=100 #inicio
exInc=100 #incremento
if mCor<50:
if exposAtual == -1:
# inicio anoitecendo
exposAtual = exIni
Set()
else:
# anoitecendo
exposAtual += exInc
Set()
return True
if mCor>140:
if exposAtual>exIni:
#amanhecendo
exposAtual -= exInc;
Set()
else:
# inicio dia
lg.print("camera.exposure_mode = 'auto' ="+str(mCor))
exposAtual = -1
cameraInit()
camSleep = camSleepD
return True
return False
def cameraInit():
global camera,rw,rh,c_raw
lg.print('init camera');
if camera!=None:
camera.close()
camera = PiCamera()
camera.resolution = (rw, rh)
camera.start_preview()
time.sleep(2)
c_raw = PiRGBArray(camera) #,size=(rw,rh))
lg.print('init camera FIM');
def monitor():
" tarefa que faz a comparação das imagens geradas pela tarefa captura "
global imgA,monitRunning,camera,mCor,camSleep,e_imgPorSeg,rw,e_cron
nv = 0
nvv = [2,2,2,2,2]
nd = 10000
imgA = getImage()
lg.print("df md tempo mdByte")
while monitRunning:
try:
#image = data.camera()
#resize(image, (100, 100))
t = time.time()
img = getImage()
df = analizaMovSP(imgA,img);
# guarda as ultimas nvv.length diferenças
nvv[nv%len(nvv)] = df
nv += 1
md = sum(nvv)/len(nvv)
#grava dados da img capturada em LOG
if False:
lg.print('{:.2f}'.format(df)
+" "+str(int(md*100)/100.0)
+" "+str(int((time.time()-t)*1000))
+" "+'{:.2f}'.format(mCor)
+" "+e_imgPorSeg
+" "+e_cron.perc()
+str(rw)
#+" "+str(len(mediaCor1))
#+" "+str(len(mediaCor2))
#+" 2 "+str((mediaCor2))
#+" ea "+str(exposAtual)
#+" es "+str(camera.exposure_speed)
#+" em "+camera.exposure_mode
#+" fr "+str(camera.framerate)
#+" iso "+str(camera.iso)
#+" flash "+camera.flash_mode
) #n_0*1.0/img1.size/cor
if df>md*1.6:
nd += 1
dr = time.strftime("%Y-%m-%d", time.localtime())
if not os.path.exists(dr):
os.mkdir(dr)
dt = dr+'/'+time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
print(str(nd)+' '+dt);
#imsave(dt+'_a_'+str(nd)+'.jpg',imgA)
#imsave(dt+'_b_'+str(nd)+'.jpg',img)
imwrite(dt+'_a_'+str(nd)+'.jpg',imgA)
imwrite(dt+'_b_'+str(nd)+'.jpg',img)
time.sleep(camSleep/1000)
imgA = img
except:
lg.erro(sys.exc_info(),'ERRO monitor')
lg.print('finalizando monitor')
def startMonitor():
t = threading.Thread(target=monitor, args=())
t.daemon = True
t.start()
class Handler(http.server.SimpleHTTPRequestHandler):
" servidor http "
#buffer = 1
#dn = time.strftime("%Y-%m-%d", time.localtime())
#log_file = open(dn+'/http.log', 'a', buffer)
#def log_message(self, format, *args):
# self.log_file.write("%s - - [%s] %s\n" %
# (self.client_address[0],
# self.log_date_time_string(),
# format%args)
# )
def sendImageBgr(self,img):
if True:
# grava jpg para converter
aq='/tmp/im-aaa-tmp.jpg';
imwrite(aq,img)
#envia jpg
self.h('image/jpeg')
f = open(aq, "rb")
bf = '?'
while len(bf)!=0:
bf = f.read()
self.wfile.write(bf)
#fim
f.close()
else:
image = img
if True:
jpg = cv2.imencode('.jpg', image)[1]
else:
jpg,found = self.analizaFoto(image,modelo)
self.wfile.write(jpg)
def Dir(self,dr):
x = [
(x[0], x[1])
for x in sorted(
[
(fn, os.stat(dr+'/'+fn))
for fn in os.listdir(dr)
]
,key = lambda x: x[1].st_mtime
,reverse = True
)
]
self.h('text/html')
self.on('<html><head><title>r0 dir</title></head>'
+'<LINK REL="shortcut icon" href="/httpCameraMotion-icone.ico"/><LINK REL="icon" href="/httpCameraMotion-icone.ico"/>'
+'<script src="../img.js"></script><body><pre>'
)
for i in x:
self.on(
i[0]
+'\t'
+time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(i[1].st_mtime))
+'\t'
+str(i[1].st_size)
)
self.on('</pre></body><html>')
def analizaFotoLIXO(self,foto,modelo):
found = False
#normalize
#image = cv2.imread("lenacolor512.tiff", cv2.IMREAD_COLOR) # uint8 image
cv2.normalize(foto, foto, 0, 255, cv2.NORM_MINMAX) #, dtype=cv2.CV_32F) # alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# converte para gray
gray = cv2.cvtColor(foto, cv2.COLOR_BGR2GRAY)
objects = modelo.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
if len(objects) > 0:
found = True
#marca a foto
#cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]])
# cor em vez de RGB é BGR
cv2.circle(foto, (10,10), 20, (0,0,255)) #color[, thickness[, lineType[, shift]]]) → None¶
# Draw a rectangle around the objects
for (x, y, w, h) in objects:
cv2.rectangle(foto, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.circle(foto, (10,10), 20, (0,0,255))
ret, jpeg = cv2.imencode('.jpg', foto)
return (jpeg.tobytes(), found)
#return (io.BytesIO(jpeg.tobytes()), found)
def dirEventos(self):
self.h('text/html')
for i in glob.glob("./10*.jpg"):
on('<img src="'+i+'">')
def foto(self):
" envia slideshow para cliente e se monit parado captura e manda uma "
global imgA,monitRunning
t = time.time()
#https://www.pyimagesearch.com/2015/03/30/accessing-the-raspberry-pi-camera-with-opencv-and-python/
if monitRunning:
self.sendImageBgr(imgA)
elif False and monitRunning:
self.h('multipart/x-mixed-replace; boundary=frame')
image = imgA
#envia jpg sempre q a imagem se altera
while True:
self.wfile.write(b'--frame\r\n')
self.wfile.write(b'Content-Type: image/jpeg\r\n\r\n')
self.sendImageBgr(image)
self.wfile.write(b'\r\n\r\n')
tf = time.time()
# espera até 7 segundos caso a imagem não se alterou repete.
while ((time.time()-tf<7) and (image.all() == imgA.all())):
time.sleep(0.2)
if image.all() == imgA.all():
break
else:
time.sleep(0.1)
image = imgA
else:
self.h('image/jpeg')
image = getImage()
self.sendImageBgr(image)
def raiz(self):
self.cookieHeader = self.headers.get('Cookie')
lg.print('cookie:'+str(self.cookieHeader));
#self.cookieSet('fig','newton')
self.h('text/html')
self.on("<html><head><title>r0</title></head>"
+"<body style=\"width:100%;\">"
#+"<h1>teste pão</h1>"
+"<img style=\"image-orientationx: 180deg flip;width:95%;\" src=\"/foto\">"
+"</body>"
+"</html>"
)
global np
np += 1
def mandaArq(self,Aq,pr):
aq = mStr((www_root+Aq).replace('//','/'))
#existe ou é diretorio
if os.path.isdir(aq):
#index.html existe
ap = aq+'/index.html'
if not os.path.isfile(ap):
self.Dir(aq)
return
aq = mStr(ap)
elif not os.path.isfile(aq):
self.h('text/html',404);
self.on('not found "'+Aq+'"')
return
ex = aq.substrRat('.')
try:
mim = mimetypes.types_map['.'+ex]
except:
mim = 'application/x-binary'
#print(aq+' e mime: '+mim)
#aq = mStr(aq);
if (
"-py-pyc-php-".find('-'+ex+'.') != -1
or
aq.find('..')!=-1
):
lg.print("segurança, 'tipo arq' proibido ou '..' .."+aq)
return
#envia arq
self.h(mim)
f = open(aq, "rb")
bf = '?'
while len(bf)!=0:
bf = f.read()
self.wfile.write(bf)
#fim
f.close()
#print("fim mandaArq");
def do_GET(self):
dr = mStr(self.path)
if dr.find('?')!=-1:
pr = mStr(dr).substrAt('?')
else:
pr = ''
dr = dr.leftAt('?')
lg.print("dr="+dr+" param="+pr)
#init cookies
self.cookie = ''
try:
if dr=='/favicon.ico':
return;
#print("path:"+dr)
if dr=='xx/':
self.raiz()
elif dr=='/setExposicao':
global exposHttp
exposHttp = float(pr);
elif dr=='/foto':
self.foto()
elif dr=='/stop':
self.h('text/html',200);
self.on('stop')
lg.print("sair solicitado pela web")
sair()
elif dr=='/dir':
self.dirEventos()
else:
self.mandaArq(dr,pr)
except Exception:
lg.erro(sys.exc_info(),'ERRO handler HTTTP')
return
def cookieSet(self,n,v):
C = http.cookies.SimpleCookie()
C[n] = v
self.cookie += C.output(header='')+'; '
def h(self,mime,cod=200):
self.send_response(cod)
#self.send_header('Content-Disposition', 'attachment; filename=test.xlsx')
if mime.startswith('text/') and mime.find(';')==-1:
mime += '; charset='+charset
self.send_header('Content-type',mime)
#set cook
if self.cookie != '':
self.send_header('Set-Cookie', self.cookie)
self.cookie = ''
#Content-Security-Policy:
if False:
csp = "default-src 'self'; img-src https://*; child-src 'none'";
csp = "default-src *; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-inline' 'unsafe-eval' http://www.google.com";
self.send_header('Content-Security-Policy',csp)
#fim
self.end_headers()
def on(self,s):
self.wfile.write(bytes(s+'\n', charset))
# biblioteca
class mStr(str):
def leftAt(self,sf):
p = self.find(sf)
if p == -1:
return self
return self[:p]
def substrRat(self,sf):
p = self.rfind(sf)
if p == -1:
return ''
return self[p+len(sf):]
def substrAt(self,sf):
p = self.find(sf)
if p == -1:
return ''
return self[p+len(sf):]
def obj(o):
return str(dir(o))
#r = ''
#for i in o:
# r += str(i)+' tp('+str(type(o[i]))+')=('+str(o[i])+')\n'
#return r;
class cronometro(object):
" cria vários cronometros reativáveis "
" para medir desempenho de partes do código "
#y_dict = {1: 'apple', 2: 'ball'}
def perc(self):
" retorna string com percentuais dos cronometros "
r = ''
for i in self.v:
v = self.v[i]
r += i+"%"+'{:.1f}'.format(v[1]/self.tempoTotal()*100)+' '
return r
def tempoTotal(self):
return time.time()-self.tt
def start(self,s):
v = None
try:
v = self.v[s]
except:
v = [0,0,-1]
self.v[s] = v
v[2] = time.time();
def stop(self,s):
v = self.v[s]
v[0] += 1;
v[1] += time.time()-v[2];
v[2] = -1
def get(self,s):
return self.v[s]
def txt(self):
r = '\n==> '+self.tit+'\tnv\tms\tmsMD'
for i in self.v:
r += ("\n"+i
+"\t"+str(self.v[i][0])
+"\t"+str(int(self.v[i][1]*1000))
+"\t"+str(int(self.v[i][1]*1000/self.v[i][0]))
)
return r
def __init__(self,a):
self.tit = a
self.tt = time.time()
self.v = {}
class log(object):
def __init__(self,path):
self.path = path
self.file = open(self.path,'a')
def rotateDay(self,tm=-1):
st = os.stat(self.path)
df = time.strftime("%Y-%m-%d", time.localtime(st.st_mtime))
dn = time.strftime("%Y-%m-%d", time.localtime())
if df != dn and ( tm==-1 or st.st_size > tm ):
self.file.close()
os.rename(self.path,self.path+'-'+df)
self.file = open(self.path,'a')
def print(self,ln):
dt = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print('lg\t'+dt+'\t'+ln)
if ln.find('\r')!=-1: ln = ln.replace('\r','')
if ln.find('\n')!=-1: ln = ln.replace('\n','\\n')
if ln.find('\t')!=-1: ln = ln.replace('\t','\\t')
self.file.write(dt+'\t'+ln+'\n')
self.file.flush()
def close(self):
self.file.close()
def erro(self,err,tx='Unexpected ERROR'):
tr = ''.join(traceback.format_tb(err[2]))
# (type, value, traceback)
self.print('==============>> '+tx)
self.print('\tTYPE:'+str(err[0]))
self.print('\tVALUE:'+str(err[1]))
self.print('\tTRACE: '+tr)
#err[2].print_exc(file=self.file)
#err[2].print_exc(file=sys.stdout)
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
def sairKill():
lg.print("saindo via KILL")
sair()
def sair():
global camera,monitRunning,killer
monitRunning = False
camera.close()
if capturando:
lg.print("aguardando fim captura...")
while capturando:
print(".")
monitRunning = False
time.sleep(0.2)
lg.print('fim captura')
httpd.server_close()
lg.print('sair(): saida normal...')
killer.kill_now = True
def startHttp():
global httpd
while monitRunning:
httpd.handle_request()
#httpd.serve_forever()
#==================================================================
# main inicio principal
#==================================================================
try:
#logs
dn = time.strftime("%Y-%m-%d", time.localtime())
if not os.path.exists(dn):
os.mkdir(dn)
lg = log(dn+'/prg.log')
#lg.rotateDay()
lg.print('inicio log...')
#camera
#camera = PiCamera(resolution=(1280, 720), framerate=30)
#rh=240;rw=320
#rh=480;rw=640
rh=768;rw=1024
#rh=1920;rw=2560
#cap continuo
c_raw=None
c_init=0
camera=None
cameraInit()
#tabela de horario claridade
mCor = 0.0 #media cor
# DIURNO
camSleepD=20
# NOTURNO
camSleepN=20
# var exec
camSleep=camSleepD
exposAtual = -1
#estatisticas
e_imgPorSeg = 0
e_cron = cronometro('desempenho codigo');
#bug ao abortar durante capture
# var controle de capture ocorrendo
capturando = False
#load mimetypes
mimetypes.init()
#print("arqs mime: "+str(mimetypes.knownfiles))
monitRunning = True
imgA = False
startMonitor()
#time.sleep(60);
#inicia SERVIDOR HTTP
www_root = '.'
charset = 'utf-8'
port = 8043
lg.print('Server listening on port '+str(port)+'...')
httpd = socketserver.TCPServer(('', port), Handler)
# gerar chave
# openssl req -new -x509 -days 4000 -nodes -out chave.crt -keyout chave.key
httpd.socket = ssl.wrap_socket (httpd.socket, certfile='../chave.crt', keyfile='../chave.key', server_side=True)
#httpd.serve_forever()
t = threading.Thread(target=startHttp, args=())
t.daemon = True
t.start()
#aguarda ser MORTO
killer = GracefulKiller()
while True:
time.sleep(3) #segundos
if killer.kill_now:
break
#fecha tudo...
sairKill()
except KeyboardInterrupt:
lg.print("saida solicitada pelo teclado!")
sair()
except:
print (sys.exc_info(),'ERRO prg principal')
lg.erro(sys.exc_info(),'ERRO prg principal')
finally:
camera.close()
lg.print("finally FIM...")
lg.close()
|
vaults.py | import logging
import re
import threading
import time
from typing import List
from brownie import chain
from eth_utils import encode_hex, event_abi_to_log_topic
from joblib import Parallel, delayed
from semantic_version.base import Version
from yearn import apy
from yearn.apy.common import ApySamples
from yearn.common import Tvl
from yearn.events import create_filter, decode_logs
from yearn.multicall2 import fetch_multicall
from yearn.prices import magic
from yearn.prices.curve import curve
from yearn.utils import safe_views, contract
from yearn.v2.strategies import Strategy
from yearn.exceptions import PriceError
from yearn.decorators import sentry_catch_all, wait_or_exit_after
from yearn.networks import Network
VAULT_VIEWS_SCALED = [
"totalAssets",
"maxAvailableShares",
"pricePerShare",
"debtOutstanding",
"creditAvailable",
"expectedReturn",
"totalSupply",
"availableDepositLimit",
"depositLimit",
"totalDebt",
"debtLimit",
"lockedProfit",
"lockedProfitDegration",
]
# we are only interested in strategy-related events
STRATEGY_EVENTS = [
"StrategyAdded",
"StrategyMigrated",
"StrategyRevoked",
"StrategyReported",
]
logger = logging.getLogger(__name__)
class Vault:
def __init__(self, vault, api_version=None, token=None, registry=None, watch_events_forever=True):
self._strategies = {}
self._revoked = {}
self._reports = []
self.vault = vault
self.api_version = api_version
if token is None:
token = vault.token()
self.token = contract(token)
self.registry = registry
self.scale = 10 ** self.vault.decimals()
# multicall-safe views with 0 inputs and numeric output.
self._views = safe_views(self.vault.abi)
# load strategies from events and watch for freshly attached strategies
self._topics = [
[
encode_hex(event_abi_to_log_topic(event))
for event in self.vault.abi
if event["type"] == "event" and event["name"] in STRATEGY_EVENTS
]
]
self._watch_events_forever = watch_events_forever
self._done = threading.Event()
self._has_exception = False
self._thread = threading.Thread(target=self.watch_events, daemon=True)
def __repr__(self):
strategies = "..." # don't block if we don't have the strategies loaded
if self._done.is_set():
strategies = ", ".join(f"{strategy}" for strategy in self.strategies)
return f'<Vault {self.vault} name="{self.name}" token={self.token} strategies=[{strategies}]>'
def __eq__(self, other):
if isinstance(other, Vault):
return self.vault == other.vault
if isinstance(other, str):
return self.vault == other
raise ValueError("Vault is only comparable with [Vault, str]")
@classmethod
def from_address(cls, address):
vault = contract(address)
instance = cls(vault=vault, token=vault.token(), api_version=vault.apiVersion())
instance.name = vault.name()
return instance
@property
def strategies(self) -> List[Strategy]:
self.load_strategies()
return list(self._strategies.values())
@property
def revoked_strategies(self) -> List[Strategy]:
self.load_strategies()
return list(self._revoked.values())
@property
def is_endorsed(self):
if not self.registry:
return None
return str(self.vault) in self.registry.vaults
@property
def is_experiment(self):
if not self.registry:
return None
# experimental vaults are either listed in the registry or have the 0x address suffix in the name
return str(self.vault) in self.registry.experiments or re.search(r"0x.*$", self.name) is not None
@wait_or_exit_after
def load_strategies(self):
if not self._thread._started.is_set():
self._thread.start()
def load_harvests(self):
Parallel(8, "threading")(delayed(strategy.load_harvests)() for strategy in self.strategies)
@sentry_catch_all
def watch_events(self):
start = time.time()
self.log_filter = create_filter(str(self.vault), topics=self._topics)
logs = self.log_filter.get_all_entries()
while True:
events = decode_logs(logs)
self.process_events(events)
if not self._done.is_set():
self._done.set()
logger.info("loaded %d strategies %s in %.3fs", len(self._strategies), self.name, time.time() - start)
if not self._watch_events_forever:
return
time.sleep(300)
# read new logs at end of loop
logs = self.log_filter.get_new_entries()
def process_events(self, events):
for event in events:
if event.name == "StrategyAdded":
strategy_address = event["strategy"]
logger.debug("%s strategy added %s", self.name, strategy_address)
try:
self._strategies[strategy_address] = Strategy(strategy_address, self, self._watch_events_forever)
except ValueError:
print(f"Error loading strategy {strategy_address}")
pass
elif event.name == "StrategyRevoked":
logger.debug("%s strategy revoked %s", self.name, event["strategy"])
self._revoked[event["strategy"]] = self._strategies.pop(
event["strategy"], Strategy(event["strategy"], self, self._watch_events_forever)
)
elif event.name == "StrategyMigrated":
logger.debug("%s strategy migrated %s -> %s", self.name, event["oldVersion"], event["newVersion"])
self._revoked[event["oldVersion"]] = self._strategies.pop(
event["oldVersion"], Strategy(event["oldVersion"], self, self._watch_events_forever)
)
self._strategies[event["newVersion"]] = Strategy(event["newVersion"], self, self._watch_events_forever)
elif event.name == "StrategyReported":
self._reports.append(event)
def describe(self, block=None):
try:
results = fetch_multicall(*[[self.vault, view] for view in self._views], block=block)
info = dict(zip(self._views, results))
for name in info:
if name in VAULT_VIEWS_SCALED:
info[name] /= self.scale
info["strategies"] = {}
except ValueError as e:
info = {"strategies": {}}
for strategy in self.strategies:
info["strategies"][strategy.unique_name] = strategy.describe(block=block)
info["token price"] = magic.get_price(self.token, block=block)
if "totalAssets" in info:
info["tvl"] = info["token price"] * info["totalAssets"]
info["experimental"] = self.is_experiment
info["address"] = self.vault
info["version"] = "v2"
return info
def apy(self, samples: ApySamples):
if self._needs_curve_simple():
return apy.curve.simple(self, samples)
elif Version(self.api_version) >= Version("0.3.2"):
return apy.v2.average(self, samples)
else:
return apy.v2.simple(self, samples)
def tvl(self, block=None):
total_assets = self.vault.totalAssets(block_identifier=block)
try:
price = magic.get_price(self.token, block=None)
except PriceError:
price = None
tvl = total_assets * price / 10 ** self.vault.decimals(block_identifier=block) if price else None
return Tvl(total_assets, price, tvl)
def _needs_curve_simple(self):
# not able to calculate gauge weighting on chains other than mainnet
curve_simple_excludes = {
Network.Fantom: [
"0xCbCaF8cB8cbeAFA927ECEE0c5C56560F83E9B7D9"
],
Network.Arbitrum: [
"0x239e14A19DFF93a17339DCC444f74406C17f8E67"
]
}
needs_simple = True
if chain.id in curve_simple_excludes:
needs_simple = self.vault.address not in curve_simple_excludes[chain.id]
return needs_simple and curve and curve.get_pool(self.token.address)
|
test_flight.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import contextlib
import os
import socket
import struct
import tempfile
import threading
import time
import traceback
import pytest
import pyarrow as pa
from pyarrow.compat import tobytes
from pyarrow.util import pathlib
try:
from pyarrow import flight
from pyarrow.flight import (
FlightServerBase, ServerAuthHandler, ClientAuthHandler
)
except ImportError:
flight = None
FlightServerBase = object
ServerAuthHandler, ClientAuthHandler = object, object
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not flight'
pytestmark = pytest.mark.flight
def test_import():
# So we see the ImportError somewhere
import pyarrow.flight # noqa
def resource_root():
"""Get the path to the test resources directory."""
if not os.environ.get("ARROW_TEST_DATA"):
raise RuntimeError("Test resources not found; set "
"ARROW_TEST_DATA to <repo root>/testing")
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
def read_flight_resource(path):
"""Get the contents of a test resource file."""
root = resource_root()
if not root:
return None
try:
with (root / path).open("rb") as f:
return f.read()
except FileNotFoundError:
raise RuntimeError(
"Test resource {} not found; did you initialize the "
"test resource submodule?\n{}".format(root / path,
traceback.format_exc()))
def example_tls_certs():
"""Get the paths to test TLS certificates."""
return {
"root_cert": read_flight_resource("root-ca.pem"),
"certificates": [
flight.CertKeyPair(
cert=read_flight_resource("cert0.pem"),
key=read_flight_resource("cert0.key"),
),
flight.CertKeyPair(
cert=read_flight_resource("cert1.pem"),
key=read_flight_resource("cert1.key"),
),
]
}
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)]),
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
def __init__(self):
super(ConstantFlightServer, self).__init__()
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table)
class MetadataFlightServer(FlightServerBase):
"""A Flight server that numbers incoming/outgoing data."""
def do_get(self, context, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.GeneratorStream(
table.schema,
self.number_batches(table))
def do_put(self, context, descriptor, reader, writer):
counter = 0
expected_data = [-10, -5, 0, 5, 10]
while True:
try:
batch, buf = reader.read_chunk()
assert batch.equals(pa.RecordBatch.from_arrays(
[pa.array([expected_data[counter]])],
['a']
))
assert buf is not None
client_counter, = struct.unpack('<i', buf.to_pybytes())
assert counter == client_counter
writer.write(struct.pack('<i', counter))
counter += 1
except StopIteration:
return
@staticmethod
def number_batches(table):
for idx, batch in enumerate(table.to_batches()):
buf = struct.pack('<i', idx)
yield batch, buf
class EchoFlightServer(FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self, expected_schema=None):
super(EchoFlightServer, self).__init__()
self.last_message = None
self.expected_schema = expected_schema
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader, writer):
if self.expected_schema:
assert self.expected_schema == reader.schema
self.last_message = reader.read_all()
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(max_chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return iter([flight.Result(context.peer_identity())])
raise NotImplementedError
class GetInfoFlightServer(FlightServerBase):
"""A Flight server that tests GetFlightInfo."""
def get_flight_info(self, context, descriptor):
return flight.FlightInfo(
pa.schema([('a', pa.int32())]),
descriptor,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
def get_schema(self, context, descriptor):
info = self.get_flight_info(context, descriptor)
return flight.SchemaResult(info.schema)
class ListActionsFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
@classmethod
def expected_actions(cls):
return [
("action-1", "description"),
("action-2", ""),
flight.ActionType("action-3", "more detail"),
]
def list_actions(self, context):
for action in self.expected_actions():
yield action
class ListActionsErrorFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
def list_actions(self, context):
yield ("action-1", "")
yield "foo"
class CheckTicketFlightServer(FlightServerBase):
"""A Flight server that compares the given ticket to an expected value."""
def __init__(self, expected_ticket):
super(CheckTicketFlightServer, self).__init__()
self.expected_ticket = expected_ticket
def do_get(self, context, ticket):
assert self.expected_ticket == ticket.ticket
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
return flight.RecordBatchStream(table)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class InvalidStreamFlightServer(FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class SlowFlightServer(FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_get(self, context, ticket):
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
self.slow_stream())
def do_action(self, context, action):
time.sleep(0.5)
return iter([])
@staticmethod
def slow_stream():
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
yield pa.Table.from_arrays(data1, names=['a'])
# The second message should never get sent; the client should
# cancel before we send this
time.sleep(10)
yield pa.Table.from_arrays(data1, names=['a'])
class ErrorFlightServer(FlightServerBase):
"""A Flight server that uses all the Flight-specific errors."""
def do_action(self, context, action):
if action.type == "internal":
raise flight.FlightInternalError("foo")
elif action.type == "timedout":
raise flight.FlightTimedOutError("foo")
elif action.type == "cancel":
raise flight.FlightCancelledError("foo")
elif action.type == "unauthenticated":
raise flight.FlightUnauthenticatedError("foo")
elif action.type == "unauthorized":
raise flight.FlightUnauthorizedError("foo")
raise NotImplementedError
def list_flights(self, context, criteria):
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
raise flight.FlightInternalError("foo")
class HttpBasicServerAuthHandler(ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super(HttpBasicServerAuthHandler, self).__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
buf = incoming.read()
auth = flight.BasicAuth.deserialize(buf)
if auth.username not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
if self.creds[auth.username] != auth.password:
raise flight.FlightUnauthenticatedError("wrong password")
outgoing.write(tobytes(auth.username))
def is_valid(self, token):
if not token:
raise flight.FlightUnauthenticatedError("token not provided")
if token not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
return token
class HttpBasicClientAuthHandler(ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super(HttpBasicClientAuthHandler, self).__init__()
self.basic_auth = flight.BasicAuth(username, password)
self.token = None
def authenticate(self, outgoing, incoming):
auth = self.basic_auth.serialize()
outgoing.write(auth)
self.token = incoming.read()
def get_token(self):
return self.token
class TokenServerAuthHandler(ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super(TokenServerAuthHandler, self).__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise flight.FlightUnauthenticatedError(
"invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise flight.FlightUnauthenticatedError("invalid token")
return token[7:]
class TokenClientAuthHandler(ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super(TokenClientAuthHandler, self).__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
@contextlib.contextmanager
def flight_server(server_base, *args, **kwargs):
"""Spawn a Flight server on a free port, shutting it down when done."""
auth_handler = kwargs.pop('auth_handler', None)
tls_certificates = kwargs.pop('tls_certificates', None)
location = kwargs.pop('location', None)
try_connect = kwargs.pop('try_connect', True)
connect_args = kwargs.pop('connect_args', {})
if location is None:
# Find a free port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = sock.getsockname()[1]
ctor = flight.Location.for_grpc_tcp
if tls_certificates:
ctor = flight.Location.for_grpc_tls
location = ctor("localhost", port)
else:
port = None
ctor_kwargs = kwargs
server_instance = server_base(*args, **ctor_kwargs)
# The server instance needs to be initialized before shutdown()
# can be called
server_instance.init(location,
auth_handler=auth_handler,
tls_certificates=tls_certificates)
def _server_thread():
server_instance.run()
thread = threading.Thread(target=_server_thread, daemon=True)
thread.start()
# Wait for server to start
if try_connect:
deadline = time.time() + 5.0
client = flight.FlightClient.connect(location, **connect_args)
while True:
try:
list(client.list_flights())
except Exception as e:
if 'Connect Failed' in str(e):
if time.time() < deadline:
time.sleep(0.025)
continue
else:
raise
break
try:
yield location
finally:
server_instance.shutdown()
thread.join(3.0)
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.pandas
def test_do_get_ints_pandas():
"""Try a simple do_get call."""
table = simple_ints_table()
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
assert list(data['some_ints']) == table.column(0).to_pylist()
def test_flight_do_get_dicts():
table = simple_dicts_table()
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
def test_flight_do_get_ticket():
"""Make sure Tickets get passed to the server."""
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
with flight_server(
CheckTicketFlightServer,
expected_ticket=b'the-ticket',
) as server_location:
client = flight.FlightClient.connect(server_location)
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
assert data.equals(table)
def test_flight_get_info():
"""Make sure FlightEndpoint accepts string and object URIs."""
with flight_server(GetInfoFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
assert info.total_records == -1
assert info.total_bytes == -1
assert info.schema == pa.schema([('a', pa.int32())])
assert len(info.endpoints) == 2
assert len(info.endpoints[0].locations) == 1
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
assert info.endpoints[1].locations[0] == \
flight.Location.for_grpc_tcp('localhost', 5005)
def test_flight_get_schema():
"""Make sure GetSchema returns correct schema."""
with flight_server(GetInfoFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
info = client.get_schema(flight.FlightDescriptor.for_command(b''))
assert info.schema == pa.schema([('a', pa.int32())])
def test_list_actions():
"""Make sure the return type of ListActions is validated."""
# ARROW-6392
with flight_server(ListActionsErrorFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
with pytest.raises(pa.ArrowException, match=".*unknown error.*"):
list(client.list_actions())
with flight_server(ListActionsFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
assert list(client.list_actions()) == \
ListActionsFlightServer.expected_actions()
def test_get_port():
"""Make sure port() works."""
server = GetInfoFlightServer()
server.init("grpc://localhost:0")
try:
assert server.port() > 0
finally:
server.shutdown()
@pytest.mark.skipif(os.name == 'nt',
reason="Unix sockets can't be tested on Windows")
def test_flight_domain_socket():
"""Try a simple do_get call over a Unix domain socket."""
with tempfile.NamedTemporaryFile() as sock:
sock.close()
location = flight.Location.for_grpc_unix(sock.name)
with flight_server(ConstantFlightServer,
location=location) as server_location:
client = flight.FlightClient.connect(server_location)
reader = client.do_get(flight.Ticket(b'ints'))
table = simple_ints_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
reader = client.do_get(flight.Ticket(b'dicts'))
table = simple_dicts_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with flight_server(EchoFlightServer,
expected_schema=data.schema) as server_location:
client = flight.FlightClient.connect(server_location)
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with flight_server(EchoStreamFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with flight_server(InvalidStreamFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with flight_server(SlowFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightTimedOutError):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
options = flight.FlightCallOptions(timeout=5.0)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
@pytest.mark.slow
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with flight_server(EchoStreamFlightServer,
auth_handler=basic_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*unauthenticated.*"):
list(client.do_action(action))
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with flight_server(EchoStreamFlightServer,
auth_handler=basic_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with flight_server(EchoStreamFlightServer,
auth_handler=basic_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*wrong password.*"):
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with flight_server(EchoStreamFlightServer,
auth_handler=token_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with flight_server(EchoStreamFlightServer,
auth_handler=token_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.FlightClient.connect("%")
server = ConstantFlightServer()
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
server.init("%")
def test_location_unknown_scheme():
"""Test creating locations for unknown schemes."""
assert flight.Location("s3://foo").uri == b"s3://foo"
assert flight.Location("https://example.com/bar.parquet").uri == \
b"https://example.com/bar.parquet"
@pytest.mark.slow
@pytest.mark.requires_testing_data
def test_tls_fails():
"""Make sure clients cannot connect when cert verification fails."""
certs = example_tls_certs()
with flight_server(
ConstantFlightServer, tls_certificates=certs["certificates"],
connect_args=dict(tls_root_certs=certs["root_cert"]),
) as server_location:
# Ensure client doesn't connect when certificate verification
# fails (this is a slow test since gRPC does retry a few times)
client = flight.FlightClient.connect(server_location)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
@pytest.mark.requires_testing_data
def test_tls_do_get():
"""Try a simple do_get call over TLS."""
table = simple_ints_table()
certs = example_tls_certs()
with flight_server(
ConstantFlightServer, tls_certificates=certs["certificates"],
connect_args=dict(tls_root_certs=certs["root_cert"]),
) as server_location:
client = flight.FlightClient.connect(
server_location, tls_root_certs=certs["root_cert"])
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_override_hostname():
"""Check that incorrectly overriding the hostname fails."""
certs = example_tls_certs()
with flight_server(
ConstantFlightServer, tls_certificates=certs["certificates"],
connect_args=dict(tls_root_certs=certs["root_cert"]),
) as server_location:
client = flight.FlightClient.connect(
server_location, tls_root_certs=certs["root_cert"],
override_hostname="fakehostname")
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
def test_flight_do_get_metadata():
"""Try a simple do_get call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
batches = []
with flight_server(MetadataFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
reader = client.do_get(flight.Ticket(b''))
idx = 0
while True:
try:
batch, metadata = reader.read_chunk()
batches.append(batch)
server_idx, = struct.unpack('<i', metadata.to_pybytes())
assert idx == server_idx
idx += 1
except StopIteration:
break
data = pa.Table.from_batches(batches)
assert data.equals(table)
def test_flight_do_put_metadata():
"""Try a simple do_put call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with flight_server(MetadataFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
with writer:
for idx, batch in enumerate(table.to_batches(max_chunksize=1)):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
buf = metadata_reader.read()
assert buf is not None
server_idx, = struct.unpack('<i', buf.to_pybytes())
assert idx == server_idx
@pytest.mark.slow
def test_cancel_do_get():
"""Test canceling a DoGet operation on the client side."""
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
reader = client.do_get(flight.Ticket(b'ints'))
reader.cancel()
with pytest.raises(flight.FlightCancelledError, match=".*Cancel.*"):
reader.read_chunk()
@pytest.mark.slow
def test_cancel_do_get_threaded():
"""Test canceling a DoGet operation from another thread."""
with flight_server(SlowFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
reader = client.do_get(flight.Ticket(b'ints'))
read_first_message = threading.Event()
stream_canceled = threading.Event()
result_lock = threading.Lock()
raised_proper_exception = threading.Event()
def block_read():
reader.read_chunk()
read_first_message.set()
stream_canceled.wait(timeout=5)
try:
reader.read_chunk()
except flight.FlightCancelledError:
with result_lock:
raised_proper_exception.set()
thread = threading.Thread(target=block_read, daemon=True)
thread.start()
read_first_message.wait(timeout=5)
reader.cancel()
stream_canceled.set()
thread.join(timeout=1)
with result_lock:
assert raised_proper_exception.is_set()
def test_roundtrip_types():
"""Make sure serializable types round-trip."""
ticket = flight.Ticket("foo")
assert ticket == flight.Ticket.deserialize(ticket.serialize())
desc = flight.FlightDescriptor.for_command("test")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
info = flight.FlightInfo(
pa.schema([('a', pa.int32())]),
desc,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
info2 = flight.FlightInfo.deserialize(info.serialize())
assert info.schema == info2.schema
assert info.descriptor == info2.descriptor
assert info.total_bytes == info2.total_bytes
assert info.total_records == info2.total_records
assert info.endpoints == info2.endpoints
def test_roundtrip_errors():
"""Ensure that Flight errors propagate from server to client."""
with flight_server(ErrorFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.do_action(flight.Action("internal", b"")))
with pytest.raises(flight.FlightTimedOutError, match=".*foo.*"):
list(client.do_action(flight.Action("timedout", b"")))
with pytest.raises(flight.FlightCancelledError, match=".*foo.*"):
list(client.do_action(flight.Action("cancel", b"")))
with pytest.raises(flight.FlightUnauthenticatedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthenticated", b"")))
with pytest.raises(flight.FlightUnauthorizedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthorized", b"")))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.list_flights())
def test_do_put_independent_read_write():
"""Ensure that separate threads can read/write on a DoPut."""
# ARROW-6063: previously this would cause gRPC to abort when the
# writer was closed (due to simultaneous reads), or would hang
# forever.
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with flight_server(MetadataFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
count = [0]
def _reader_thread():
while metadata_reader.read() is not None:
count[0] += 1
thread = threading.Thread(target=_reader_thread)
thread.start()
batches = table.to_batches(max_chunksize=1)
with writer:
for idx, batch in enumerate(batches):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
# Causes the server to stop writing and end the call
writer.done_writing()
# Thus reader thread will break out of loop
thread.join()
# writer.close() won't segfault since reader thread has
# stopped
assert count[0] == len(batches)
|
base.py | import logging
try:
from Queue import Queue # PY2
except ImportError:
from queue import Queue # PY3
from threading import Thread
try:
from urlparse import urljoin # PY2
except ImportError:
from urllib.parse import urljoin # PY3
from six import iteritems
from six.moves import range
from .utils import bs, requests_get, get_all_sites, get_list_filters
ALL_SITES = get_all_sites() # All the Craiglist sites
RESULTS_PER_REQUEST = 100 # Craigslist returns 100 results per request
class CraigslistBase(object):
""" Base class for all Craiglist wrappers. """
url_templates = {
'base': 'http://%(site)s.craigslist.org',
'no_area': 'http://%(site)s.craigslist.org/search/%(category)s',
'area': 'http://%(site)s.craigslist.org/search/%(area)s/%(category)s'
}
default_site = 'sfbay'
default_category = None
base_filters = {
'query': {'url_key': 'query', 'value': None},
'search_titles': {'url_key': 'srchType', 'value': 'T'},
'has_image': {'url_key': 'hasPic', 'value': 1},
'posted_today': {'url_key': 'postedToday', 'value': 1},
'bundle_duplicates': {'url_key': 'bundleDuplicates', 'value': 1},
'search_distance': {'url_key': 'search_distance', 'value': None},
'zip_code': {'url_key': 'postal', 'value': None},
}
extra_filters = {}
__list_filters = {} # Cache for list filters requested by URL
# Set to True to subclass defines the customize_results() method
custom_result_fields = False
sort_by_options = {
'newest': 'date',
'price_asc': 'priceasc',
'price_desc': 'pricedsc',
}
def __init__(self, site=None, area=None, category=None, filters=None,
log_level=logging.WARNING):
# Logging
self.set_logger(log_level, init=True)
self.site = site or self.default_site
if self.site not in ALL_SITES:
msg = "'%s' is not a valid site" % self.site
self.logger.error(msg)
raise ValueError(msg)
if area:
if not self.is_valid_area(area):
msg = "'%s' is not a valid area for site '%s'" % (area, site)
self.logger.error(msg)
raise ValueError(msg)
self.area = area
self.category = category or self.default_category
url_template = self.url_templates['area' if area else 'no_area']
self.url = url_template % {'site': self.site, 'area': self.area,
'category': self.category}
self.filters = self.get_filters(filters)
def get_filters(self, filters):
"""Parses filters passed by the user into GET parameters."""
list_filters = self.get_list_filters(self.url)
# If a search has few results, results for "similar listings" will be
# included. The solution is a bit counter-intuitive, but to force this
# not to happen, we set searchNearby=True, but not pass any
# nearbyArea=X, thus showing no similar listings.
parsed_filters = {'searchNearby': 1}
for key, value in iteritems((filters or {})):
try:
filter_ = (self.base_filters.get(key) or
self.extra_filters.get(key) or
list_filters[key])
if filter_['value'] is None:
parsed_filters[filter_['url_key']] = value
elif isinstance(filter_['value'], list):
valid_options = filter_['value']
if not hasattr(value, '__iter__'):
value = [value] # Force to list
options = []
for opt in value:
try:
options.append(valid_options.index(opt) + 1)
except ValueError:
self.logger.warning(
"'%s' is not a valid option for %s"
% (opt, key)
)
parsed_filters[filter_['url_key']] = options
elif value: # Don't add filter if ...=False
parsed_filters[filter_['url_key']] = filter_['value']
except KeyError:
self.logger.warning("'%s' is not a valid filter", key)
return parsed_filters
def set_logger(self, log_level, init=False):
if init:
self.logger = logging.getLogger('python-craiglist')
self.handler = logging.StreamHandler()
self.logger.addHandler(self.handler)
self.logger.setLevel(log_level)
self.handler.setLevel(log_level)
def is_valid_area(self, area):
base_url = self.url_templates['base']
response = requests_get(base_url % {'site': self.site},
logger=self.logger)
soup = bs(response.content)
sublinks = soup.find('ul', {'class': 'sublinks'})
return sublinks and sublinks.find('a', text=area) is not None
def get_results_approx_count(self, soup=None):
"""
Gets (approx) amount of results to be returned by `get_results`.
Note that this number could be not exactly the same as the actual
len of results returned (although from my tests usually within +/-10).
Also note that this will make an extra request to Craigslist (if `soup`
is not provided).
"""
if soup is None:
response = requests_get(self.url, params=self.filters,
logger=self.logger)
self.logger.info('GET %s', response.url)
self.logger.info('Response code: %s', response.status_code)
response.raise_for_status() # Something failed?
soup = bs(response.content)
totalcount = soup.find('span', {'class': 'totalcount'})
return int(totalcount.text) if totalcount else None
def get_results(self, limit=None, start=0, sort_by=None, geotagged=False,
include_details=False):
"""
Gets results from Craigslist based on the specified filters.
If geotagged=True, the results will include the (lat, lng) in the
'geotag' attrib (this will make the process a little bit longer).
"""
if sort_by:
try:
self.filters['sort'] = self.sort_by_options[sort_by]
except KeyError:
msg = ("'%s' is not a valid sort_by option, "
"use: 'newest', 'price_asc' or 'price_desc'" % sort_by)
self.logger.error(msg)
raise ValueError(msg)
total_so_far = start
results_yielded = 0
total = 0
while True:
self.filters['s'] = start
response = requests_get(self.url, params=self.filters,
logger=self.logger)
self.logger.info('GET %s', response.url)
self.logger.info('Response code: %s', response.status_code)
response.raise_for_status() # Something failed?
soup = bs(response.content)
if not total:
total = self.get_results_approx_count(soup=soup)
rows = soup.find('ul', {'class': 'rows'})
for row in rows.find_all('li', {'class': 'result-row'},
recursive=False):
if limit is not None and results_yielded >= limit:
break
self.logger.debug('Processing %s of %s results ...',
total_so_far + 1, total or '(undefined)')
yield self.process_row(row, geotagged, include_details)
results_yielded += 1
total_so_far += 1
if results_yielded == limit:
break
if (total_so_far - start) < RESULTS_PER_REQUEST:
break
start = total_so_far
def process_row(self, row, geotagged=False, include_details=False):
id = row.attrs['data-pid']
repost_of = row.attrs.get('data-repost-of')
link = row.find('a', {'class': 'hdrlnk'})
name = link.text
url = urljoin(self.url, link.attrs['href'])
time = row.find('time')
if time:
datetime = time.attrs['datetime']
else:
pl = row.find('span', {'class': 'pl'})
datetime = pl.text.split(':')[0].strip() if pl else None
price = row.find('span', {'class': 'result-price'})
where = row.find('span', {'class': 'result-hood'})
if where:
where = where.text.strip()[1:-1] # remove ()
tags_span = row.find('span', {'class': 'result-tags'})
tags = tags_span.text if tags_span else ''
result = {'id': id,
'repost_of': repost_of,
'name': name,
'url': url,
# NOTE: Keeping 'datetime' for backwards
# compatibility, use 'last_updated' instead.
'datetime': datetime,
'last_updated': datetime,
'price': price.text if price else None,
'where': where,
'has_image': 'pic' in tags,
'geotag': None,
# In very few cases, a posting will be included in the result
# list but it has already been deleted (or it has been
# deleted after the list was retrieved). In that case, this
# field will be marked as True. If you want to be extra
# careful, always check this field is False before using a
# result.
'deleted': False}
if geotagged or include_details:
detail_soup = self.fetch_content(result['url'])
if geotagged:
self.geotag_result(result, detail_soup)
if include_details:
self.include_details(result, detail_soup)
if self.custom_result_fields:
self.customize_result(result)
return result
def customize_result(self, result):
""" Adds custom/delete/alter fields to result. """
# Override in subclass to add category-specific fields.
# FYI: `attrs` will only be presented if include_details was True.
pass
def geotag_result(self, result, soup):
""" Adds (lat, lng) to result. """
self.logger.debug('Geotagging result ...')
map_ = soup.find('div', {'id': 'map'})
if map_:
result['geotag'] = (float(map_.attrs['data-latitude']),
float(map_.attrs['data-longitude']))
return result
def include_details(self, result, soup):
""" Adds description, images to result """
self.logger.debug('Adding details to result...')
body = soup.find('section', id='postingbody')
if not body:
# This should only happen when the posting has been deleted by its
# author.
result['deleted'] = True
return
# We need to massage the data a little bit because it might include
# some inner elements that we want to ignore.
body_text = (getattr(e, 'text', e) for e in body
if not getattr(e, 'attrs', None))
result['body'] = ''.join(body_text).strip()
# Add created time (in case it's different from last updated).
postinginfos = soup.find('div', {'class': 'postinginfos'})
for p in postinginfos.find_all('p'):
if 'posted' in p.text:
time = p.find('time')
if time:
# This date is in ISO format. I'm removing the T literal
# and the timezone to make it the same format as
# 'last_updated'.
created = time.attrs['datetime'].replace('T', ' ')
result['created'] = created.rsplit(':', 1)[0]
# Add images' urls.
image_tags = soup.find_all('img')
# If there's more than one picture, the first one will be repeated.
image_tags = image_tags[1:] if len(image_tags) > 1 else image_tags
images = []
for img in image_tags:
try:
img_link = img['src'].replace('50x50c', '600x450')
images.append(img_link)
except KeyError:
continue # Some posts contain empty <img> tags.
result['images'] = images
# Add list of attributes as unparsed strings. These values are then
# processed by `parse_attrs`, and are available to be post-processed
# by subclasses.
attrgroups = soup.find_all('p', {'class': 'attrgroup'})
attrs = []
for attrgroup in attrgroups:
for attr in attrgroup.find_all('span'):
attr_text = attr.text.strip()
if attr_text:
attrs.append(attr_text)
result['attrs'] = attrs
if attrs:
self.parse_attrs(result)
# If an address is included, add it to `address`.
mapaddress = soup.find('div', {'class': 'mapaddress'})
if mapaddress:
result['address'] = mapaddress.text
def parse_attrs(self, result):
"""Parses raw attributes into structured fields in the result dict."""
# Parse binary fields first by checking their presence.
attrs = set(attr.lower() for attr in result['attrs'])
for key, options in iteritems(self.extra_filters):
if options['value'] != 1:
continue # Filter is not binary
if options.get('attr', '') in attrs:
result[key] = True
# Values from list filters are sometimes shown as {filter}: {value}
# e.g. "transmission: automatic", although usually they are shown only
# with the {value}, e.g. "laundry in bldg". By stripping the content
# before the colon (if any) we reduce it to a single case.
attrs_after_colon = set(
attr.split(': ', 1)[-1] for attr in result['attrs'])
for key, options in iteritems(self.get_list_filters(self.url)):
for option in options['value']:
if option in attrs_after_colon:
result[key] = option
break
def fetch_content(self, url):
response = requests_get(url, logger=self.logger)
self.logger.info('GET %s', response.url)
self.logger.info('Response code: %s', response.status_code)
if response.ok:
return bs(response.content)
return None
def geotag_results(self, results, workers=8):
"""
Adds (lat, lng) to each result. This process is done using N threads,
where N is the amount of workers defined (default: 8).
"""
results = list(results)
queue = Queue()
for result in results:
queue.put(result)
def geotagger():
while not queue.empty():
self.logger.debug('%s results left to geotag ...',
queue.qsize())
self.geotag_result(queue.get())
queue.task_done()
threads = []
for _ in range(workers):
thread = Thread(target=geotagger)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
return results
@classmethod
def get_list_filters(cls, url):
if cls.__list_filters.get(url) is None:
cls.__list_filters[url] = get_list_filters(url)
return cls.__list_filters[url]
@classmethod
def show_filters(cls, category=None):
print('Base filters:')
for key, options in iteritems(cls.base_filters):
value_as_str = '...' if options['value'] is None else 'True/False'
print('* %s = %s' % (key, value_as_str))
print('Section specific filters:')
for key, options in iteritems(cls.extra_filters):
value_as_str = '...' if options['value'] is None else 'True/False'
print('* %s = %s' % (key, value_as_str))
url = cls.url_templates['no_area'] % {
'site': cls.default_site,
'category': category or cls.default_category,
}
list_filters = cls.get_list_filters(url)
for key, options in iteritems(list_filters):
value_as_str = ', '.join([repr(opt) for opt in options['value']])
print('* %s = %s' % (key, value_as_str))
|
test_do_spaces.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import pickle
import threading
import warnings
from datetime import datetime
from unittest import skipIf
from botocore.exceptions import ClientError
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.test import TestCase, override_settings
from django.utils.timezone import is_aware, utc
from storages.backends import do_spaces, s3boto3
try:
from django.utils.six.moves.urllib import parse as urlparse
except ImportError:
from urllib import parse as urlparse
try:
from unittest import mock
except ImportError: # Python 3.2 and below
import mock
class DigitalOceanSpacesTestCase(TestCase):
def setUp(self):
self.storage = do_spaces.DigitalOceanSpacesStorage()
self.storage._connections.connection = mock.MagicMock()
class DigitalOceanSpacesStorageTests(DigitalOceanSpacesTestCase):
def test_clean_name(self):
"""
Test the base case of _clean_name
"""
path = self.storage._clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self):
"""
Test the normalization of _clean_name
"""
path = self.storage._clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_pickle_with_bucket(self):
"""
Test that the storage can be pickled with a bucket attached
"""
# Ensure the bucket has been used
self.storage.bucket
self.assertIsNotNone(self.storage._bucket)
# Can't pickle MagicMock, but you can't pickle a real Bucket object either
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
# Put the mock connection back in
new_storage._connections.connection = mock.MagicMock()
self.assertIsNone(new_storage._bucket)
new_storage.bucket
self.assertIsNotNone(new_storage._bucket)
def test_pickle_without_bucket(self):
"""
Test that the storage can be pickled, without a bucket instance
"""
# Can't pickle a threadlocal
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
def test_storage_url_slashes(self):
"""
Test URL generation.
"""
self.storage.custom_domain = 'example.com'
# We expect no leading slashes in the path,
# and trailing slashes should be preserved.
self.assertEqual(self.storage.url(''), 'https://example.com/')
self.assertEqual(self.storage.url('path'), 'https://example.com/path')
self.assertEqual(self.storage.url('path/'), 'https://example.com/path/')
self.assertEqual(self.storage.url('path/1'), 'https://example.com/path/1')
self.assertEqual(self.storage.url('path/1/'), 'https://example.com/path/1/')
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': self.storage.default_acl,
'CacheControl': 'max-age=86400',
}
)
def test_storage_save_with_acl(self):
"""
Test saving a file with user defined ACL.
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.default_acl = 'private'
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': 'private',
'CacheControl': 'max-age=86400',
}
)
def test_content_type(self):
"""
Test saving a file with a None content type.
"""
name = 'test_image.jpg'
content = ContentFile('data')
content.content_type = None
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'image/jpeg',
'ACL': self.storage.default_acl,
'CacheControl': 'max-age=86400',
}
)
def test_storage_save_gzipped(self):
"""
Test saving a gzipped file
"""
name = 'test_storage_save.gz'
content = ContentFile("I am gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'application/octet-stream',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
'CacheControl': 'max-age=86400',
}
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
'CacheControl': 'max-age=86400',
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_storage_save_gzip_twice(self):
"""
Test saving the same file content twice with gzip enabled.
"""
# Given
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
# When
self.storage.save(name, content)
self.storage.save('test_storage_save_2.css', content)
# Then
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
'CacheControl': 'max-age=86400',
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
self.storage.gzip = True
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writïng.txt'
content = 'new content'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
file.write(content)
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY',
CacheControl='max-age=86400'
)
# Save the internal file before closing
multipart = obj.initiate_multipart_upload.return_value
multipart.parts.all.return_value = [mock.MagicMock(e_tag='123', part_number=1)]
file.close()
multipart.Part.assert_called_with(1)
part = multipart.Part.return_value
part.upload.assert_called_with(Body=content.encode('utf-8'))
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [{'ETag': '123', 'PartNumber': 1}]})
def test_storage_write_beyond_buffer_size(self):
"""
Test writing content that exceeds the buffer size
"""
name = 'test_open_for_writïng_beyond_buffer_size.txt'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
# Initiate the multipart upload
file.write('')
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY',
CacheControl='max-age=86400',
)
multipart = obj.initiate_multipart_upload.return_value
# Write content at least twice as long as the buffer size
written_content = ''
counter = 1
while len(written_content) < 2 * file.buffer_size:
content = 'hello, aws {counter}\n'.format(counter=counter)
# Write more than just a few bytes in each iteration to keep the
# test reasonably fast
content += '*' * int(file.buffer_size / 10)
file.write(content)
written_content += content
counter += 1
# Save the internal file before closing
multipart.parts.all.return_value = [
mock.MagicMock(e_tag='123', part_number=1),
mock.MagicMock(e_tag='456', part_number=2)
]
file.close()
self.assertListEqual(
multipart.Part.call_args_list,
[mock.call(1), mock.call(2)]
)
part = multipart.Part.return_value
uploaded_content = ''.join(
(args_list[1]['Body'].decode('utf-8')
for args_list in part.upload.call_args_list)
)
self.assertEqual(uploaded_content, written_content)
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [
{'ETag': '123', 'PartNumber': 1},
{'ETag': '456', 'PartNumber': 2},
]}
)
def test_auto_creating_bucket(self):
self.storage.auto_create_bucket = True
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_auto_creating_bucket_with_acl(self):
self.storage.auto_create_bucket = True
self.storage.bucket_acl = 'public-read'
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_storage_exists(self):
self.assertTrue(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key="file.txt",
)
def test_storage_exists_false(self):
self.storage.connection.meta.client.head_object.side_effect = ClientError(
{'Error': {'Code': '404', 'Message': 'Not Found'}},
'HeadObject',
)
self.assertFalse(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key='file.txt',
)
def test_storage_exists_doesnt_create_bucket(self):
with mock.patch.object(self.storage, '_get_or_create_bucket') as method:
self.storage.exists('file.txt')
self.assertFalse(method.called)
def test_storage_delete(self):
self.storage.delete("path/to/file.txt")
self.storage.bucket.Object.assert_called_with('path/to/file.txt')
self.storage.bucket.Object.return_value.delete.assert_called_with()
def test_storage_listdir_base(self):
# Files:
# some/path/1.txt
# 2.txt
# other/path/3.txt
# 4.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some'},
{'Prefix': 'other'},
],
'Contents': [
{'Key': '2.txt'},
{'Key': '4.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='')
self.assertEqual(dirs, ['some', 'other'])
self.assertEqual(files, ['2.txt', '4.txt'])
def test_storage_listdir_subdir(self):
# Files:
# some/path/1.txt
# some/2.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some/path'},
],
'Contents': [
{'Key': 'some/2.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('some/')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='some/')
self.assertEqual(dirs, ['path'])
self.assertEqual(files, ['2.txt'])
def test_storage_size(self):
obj = self.storage.bucket.Object.return_value
obj.content_length = 4098
name = 'file.txt'
self.assertEqual(self.storage.size(name), obj.content_length)
def test_storage_mtime(self):
# Test both USE_TZ cases
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
self._test_storage_mtime(use_tz)
def _test_storage_mtime(self, use_tz):
obj = self.storage.bucket.Object.return_value
obj.last_modified = datetime.now(utc)
name = 'file.txt'
self.assertFalse(
is_aware(self.storage.modified_time(name)),
'Naive datetime object expected from modified_time()'
)
self.assertIs(
settings.USE_TZ,
is_aware(self.storage.get_modified_time(name)),
'%s datetime object expected from get_modified_time() when USE_TZ=%s' % (
('Naive', 'Aware')[settings.USE_TZ],
settings.USE_TZ
)
)
def test_storage_url(self):
name = 'test_storage_size.txt'
url = 'http://aws.amazon.com/%s' % name
self.storage.bucket.meta.client.generate_presigned_url.return_value = url
self.storage.bucket.name = 'bucket'
self.assertEqual(self.storage.url(name), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire
)
custom_expire = 123
self.assertEqual(self.storage.url(name, expire=custom_expire), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=custom_expire
)
def test_generated_url_is_encoded(self):
self.storage.custom_domain = "mock.cloudfront.net"
filename = "whacky & filename.mp4"
url = self.storage.url(filename)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path,
"/whacky%20%26%20filename.mp4")
self.assertFalse(self.storage.bucket.meta.client.generate_presigned_url.called)
def test_special_characters(self):
self.storage.custom_domain = "mock.cloudfront.net"
name = "ãlöhâ.jpg"
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
url = self.storage.url(name)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path, "/%C3%A3l%C3%B6h%C3%A2.jpg")
def test_strip_signing_parameters(self):
expected = 'http://bucket.s3-aws-region.amazonaws.com/foo/bar'
self.assertEqual(self.storage._strip_signing_parameters(
'%s?X-Amz-Date=12345678&X-Amz-Signature=Signature' % expected), expected)
self.assertEqual(self.storage._strip_signing_parameters(
'%s?expires=12345678&signature=Signature' % expected), expected)
@skipIf(threading is None, 'Test requires threading')
def test_connection_threading(self):
connections = []
def thread_storage_connection():
connections.append(self.storage.connection)
for x in range(2):
t = threading.Thread(target=thread_storage_connection)
t.start()
t.join()
# Connection for each thread needs to be unique
self.assertIsNot(connections[0], connections[1])
def test_location_leading_slash(self):
msg = (
"S3Boto3Storage.location cannot begin with a leading slash. "
"Found '/'. Use '' instead."
)
with self.assertRaises(ImproperlyConfigured, msg=msg):
s3boto3.S3Boto3Storage(location='/')
def test_deprecated_acl(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(acl='private')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The acl argument of S3Boto3Storage is deprecated. Use argument "
"default_acl or setting AWS_DEFAULT_ACL instead. The acl argument "
"will be removed in version 2.0."
)
assert str(w[-1].message) == message
def test_deprecated_bucket(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(bucket='django')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The bucket argument of S3Boto3Storage is deprecated. Use argument "
"bucket_name or setting AWS_STORAGE_BUCKET_NAME instead. The bucket "
"argument will be removed in version 2.0."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl(self):
with warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage()
assert len(w) == 1
message = (
"The default behavior of S3Boto3Storage is insecure and will change "
"in django-storages 2.0. By default files and new buckets are saved "
"with an ACL of 'public-read' (globally publicly readable). Version 2.0 will "
"default to using the bucket's ACL. To opt into the new behavior set "
"AWS_DEFAULT_ACL = None, otherwise to silence this warning explicitly "
"set AWS_DEFAULT_ACL."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl_override_class_variable(self):
class MyStorage(s3boto3.S3Boto3Storage):
default_acl = "private"
with warnings.catch_warnings(record=True) as w:
MyStorage()
assert len(w) == 0
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import *
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import KOINON_ELECTRUM_VERSION
from electrum.network import Network
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
self.history_model.on_fee_histogram()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Koinon Electrum Wallet Testnet" if constants.net.TESTNET else "Koinon"
title = '%s %s - %s' % (name, KOINON_ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Koinon",
(_("Version")+" %s" % KOINON_ELECTRUM_VERSION + "\n\n" +
_("Koinon's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Koinon (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Koinon Electrum Wallet- " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Koinon Electrum Wallet", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Koinon Electrum Wallet", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected%s.png"%fork_str)
else:
icon = QIcon(":icons/status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Koinon tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
status, msg = False, repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.print_error(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
display_msg = _('The server returned an error when broadcasting the transaction.')
if msg:
display_msg += '\n' + msg
parent.show_error(display_msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
#traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
installwizard.py |
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum_cadex.wallet import Wallet
from electrum_cadex.storage import WalletStorage
from electrum_cadex.util import UserCancelled, InvalidPassword
from electrum_cadex.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_cadex.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:XERBBcaPf5D5... \t-> XhGqfhnL...\n')
# note: full key is XERBBcaPf5D5oFXTEP7TdPWLem5ktc2Zr3AhhQhHVQaF49fDP6tN
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(4, 4, self.size-8, self.size-8)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 8, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, plugins, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Cadex Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-cadex.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self, get_wallet_from_daemon):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Cadex Electrum wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.storage = wallet_from_memory.storage
else:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
return
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if not self.storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.storage.path)
if wallet_from_memory:
return wallet_from_memory
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet(get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Cadex Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
action = self.storage.get_action()
if action and action not in ('new', 'upgrade_storage'):
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title, message1, choices, message2,
test_text, run_next) -> (str, str):
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("cadex Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let cadex Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
updating_server.py | #!/usr/bin/env python
"""
Pymodbus Server With Updating Thread
--------------------------------------------------------------------------
This is an example of having a background thread updating the
context while the server is operating. This can also be done with
a python thread::
from threading import Thread
thread = Thread(target=updating_writer, args=(context,))
thread.start()
"""
# --------------------------------------------------------------------------- #
# import the modbus libraries we need
# --------------------------------------------------------------------------- #
from pymodbus.server.asynchronous import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
# --------------------------------------------------------------------------- #
# import the twisted libraries we need
# --------------------------------------------------------------------------- #
from twisted.internet.task import LoopingCall
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# --------------------------------------------------------------------------- #
# define your callback process
# --------------------------------------------------------------------------- #
def update_read(a):
log.debug("updating the context")
context = a[0]
fx = 0x6
slave_id = 0x00
address = 0x0
values = context[slave_id].getValues(fx, address,20)
log.debug("new value: {0};".format(values))
def update_reads(a):
log.debug("updating the context")
context = a[0]
register = 1
slave_id = 0x00
address = 0x1
values = context[slave_id].getValues(register, address, count=10)
values = [v + 1 for v in values]
log.debug("new values: " + str(values))
def updating_writer_(a):
""" A worker process that runs every so often and
updates live values of the context. It should be noted
that there is a race condition for the update.
:param arguments: The input arguments to the call
"""
log.debug("updating the context")
context = a[0]
register = 1
slave_id = 0x00
address = 0x1
for i in range(0,3):
values = context[i].getValues(register, address, count=10)
values = [v + 1 for v in values]
log.debug("new values: " + str(values))
context[i].setValues(register, address, values)
def updating_writer(a):
""" A worker process that runs every so often and
updates live values of the context. It should be noted
that there is a race condition for the update.
:param arguments: The input arguments to the call
"""
log.debug("updating the context")
context = a[0]
register = 1
slave_id = 0x00
address = 0x1
values = context[slave_id].getValues(register, address, count=10)
values = [v + 1 for v in values]
log.debug("new values: " + str(values))
context[slave_id].setValues(register, address, values)
def run_updating_server():
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(0, [0]*100),
co=ModbusSequentialDataBlock(0, [0]*100),
hr=ModbusSequentialDataBlock(0, [0]*100),
ir=ModbusSequentialDataBlock(0, [0]*100))
context = ModbusServerContext(slaves=store, single=True)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = '2.3.0'
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
time = 3 # 5 seconds delay
loop = LoopingCall(f=update_read, a=(context,))
loop.start(time, now=False) # initially delay by time
StartTcpServer(context, identity=identity, address=("localhost", 5020))
if __name__ == "__main__":
run_updating_server()
|
online.py | '''
Online tests
'''
import unittest
from unittest import TestCase
from mock import MagicMock
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from rest_service import RestService
import time
import requests
from threading import Thread
class TestRestService(TestCase):
# random port number for local connections
port_number = 62976
def setUp(self):
self.rest_service = RestService("localsettings.py")
self.rest_service.setup()
self.rest_service.settings['FLASK_PORT'] = self.port_number
def run_server():
self.rest_service.run()
self._server_thread = Thread(target=run_server)
self._server_thread.setDaemon(True)
self._server_thread.start()
# sleep 10 seconds for everything to boot up
time.sleep(10)
def test_status(self):
r = requests.get('http://127.0.0.1:{p}'.format(p=self.port_number))
results = r.json()
self.assertEquals(results['node_health'], 'GREEN')
def tearDown(self):
self.rest_service.close()
if __name__ == '__main__':
unittest.main() |
main.py | #!/usr/bin/env python
import os
from datetime import datetime, timezone, timedelta
import threading
import logging
import re
import requests
import boto3
# semaphore limit of 5, picked this number arbitrarily
maxthreads = 5
sema = threading.Semaphore(value=maxthreads)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Fetch the PD API token from PD_API_KEY_NAME key in SSM
PD_API_KEY = boto3.client('ssm').get_parameters(
Names=[os.environ['PD_API_KEY_NAME']],
WithDecryption=True)['Parameters'][0]['Value']
# Get the Current User on-call for a given schedule
def get_user(schedule_id):
global PD_API_KEY
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Authorization': 'Token token={token}'.format(token=PD_API_KEY)
}
normal_url = 'https://api.pagerduty.com/schedules/{0}/users'.format(
schedule_id
)
override_url = 'https://api.pagerduty.com/schedules/{0}/overrides'.format(
schedule_id
)
# This value should be less than the running interval
# It is best to use UTC for the datetime object
now = datetime.now(timezone.utc)
since = now - timedelta(minutes=1) # One minute ago
payload = {}
payload['since'] = since.isoformat()
payload['until'] = now.isoformat()
normal = requests.get(normal_url, headers=headers, params=payload)
if normal.status_code == 404:
logger.critical("ABORT: Not a valid schedule: {}".format(schedule_id))
return False
try:
username = normal.json()['users'][0]['name']
# Check for overrides
# If there is *any* override, then the above username is an override
# over the normal schedule. The problem must be approached this way
# because the /overrides endpoint does not guarentee an order of the
# output.
override = requests.get(override_url, headers=headers, params=payload)
if override.json()['overrides']: # is not empty list
username = username + " (Override)"
except IndexError:
username = "No One :thisisfine:"
logger.info("Currently on call: {}".format(username))
return username
def get_pd_schedule_name(schedule_id):
global PD_API_KEY
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Authorization': 'Token token={token}'.format(token=PD_API_KEY)
}
url = 'https://api.pagerduty.com/schedules/{0}'.format(schedule_id)
r = requests.get(url, headers=headers)
try:
return r.json()['schedule']['name']
except KeyError:
logger.debug(r.status_code)
logger.debug(r.json())
return None
def get_slack_topic(channel):
payload = {}
payload['token'] = boto3.client('ssm').get_parameters(
Names=[os.environ['SLACK_API_KEY_NAME']],
WithDecryption=True)['Parameters'][0]['Value']
payload['channel'] = channel
try:
r = requests.post('https://slack.com/api/conversations.info', data=payload)
current = r.json()['channel']['topic']['value']
logger.debug("Current Topic: '{}'".format(current))
except KeyError:
logger.critical("Could not find '{}' on slack, has the on-call bot been removed from this channel?".format(channel))
return current
def update_slack_topic(channel, proposed_update):
logger.debug("Entered update_slack_topic() with: {} {}".format(
channel,
proposed_update)
)
payload = {}
payload['token'] = boto3.client('ssm').get_parameters(
Names=[os.environ['SLACK_API_KEY_NAME']],
WithDecryption=True)['Parameters'][0]['Value']
payload['channel'] = channel
# This is tricky to get correct for all the edge cases
# Because Slack adds a '<mailto:foo@example.com|foo@example.com>' behind the
# scenes, we need to match the email address in the first capturing group,
# then replace the rest of the string with the address
# None of this is really ideal because we lose the "linking" aspect in the
# Slack Topic.
current_full_topic = re.sub(r'<mailto:([a-zA-Z@.]*)(?:[|a-zA-Z@.]*)>',
r'\1', get_slack_topic(channel))
# Also handle Slack "Subteams" in the same way as above
current_full_topic = re.sub(r'<(?:!subteam\^[A-Z0-9|]*)([@A-Za-z-]*)>', r'\1',
current_full_topic)
# Also handle Slack Channels in the same way as above
current_full_topic = re.sub(r'<(?:#[A-Z0-9|]*)([@A-Za-z-]*)>', r'#\1',
current_full_topic)
if current_full_topic:
# This should match every case EXCEPT when onboarding a channel and it
# already has a '|' in it. Workaround: Fix topic again and it will be
# correct in the future
current_full_topic_delimit_count = current_full_topic.count('|')
c_delimit_count = current_full_topic_delimit_count - 1
if c_delimit_count < 1:
c_delimit_count = 1
# This rsplit is fragile too!
# The original intent was to preserve a '|' in the scehdule name but
# that means multiple pipes in the topic do not work...
try:
first_part = current_full_topic.rsplit('|', c_delimit_count)[0].strip()
second_part = current_full_topic.replace(first_part + " |", "").strip()
except IndexError: # if there is no '|' in the topic
first_part = "none"
second_part = current_full_topic
else:
first_part = "none"
second_part = "." # if there is no topic, just add something
if proposed_update != first_part:
# slack limits topic to 250 chars
topic = "{} | {}".format(proposed_update, second_part)
if len(topic) > 250:
topic = topic[0:247] + "..."
payload['topic'] = topic
r = requests.post('https://slack.com/api/conversations.setTopic', data=payload)
logger.debug("Response for '{}' was: {}".format(channel, r.json()))
else:
logger.info("Not updating slack, topic is the same")
return None
def figure_out_schedule(s):
# Purpose here is to find the schedule id if given a human readable name
# fingers crossed that this regex holds for awhile. "PXXXXXX"
if re.match('^P[a-zA-Z0-9]{6}', s):
return s
global PD_API_KEY
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Authorization': 'Token token={token}'.format(token=PD_API_KEY)
}
url = 'https://api.pagerduty.com/schedules/'
payload = {}
payload['query'] = s
# If there is no override, then check the schedule directly
r = requests.get(url, headers=headers, params=payload)
try:
# This is fragile. fuzzy search may not do what you want
sid = r.json()['schedules'][0]['id']
except IndexError:
logger.debug("Schedule Not Found for: {}".format(s))
sid = None
return sid
def do_work(obj):
# entrypoint of the thread
sema.acquire()
logger.debug("Operating on {}".format(obj))
# schedule will ALWAYS be there, it is a ddb primarykey
schedule = figure_out_schedule(obj['schedule']['S'])
if schedule:
username = get_user(schedule)
else:
logger.critical("Exiting: Schedule not found or not valid, see previous errors")
return 127
try:
sched_name = obj['sched_name']['S']
except:
sched_name = get_pd_schedule_name(schedule)
if username is not None: # then it is valid and update the chat topic
topic = "{} is on-call for {}".format(
username,
sched_name
)
if 'slack' in obj.keys():
slack = obj['slack']['S']
# 'slack' may contain multiple channels seperated by whitespace
for channel in slack.split():
update_slack_topic(channel, topic)
elif 'hipchat' in obj.keys():
# hipchat = obj['hipchat']['S']
logger.critical("HipChat is not supported yet. Ignoring this entry...")
sema.release()
def handler(event, context):
print(event)
ddb = boto3.client('dynamodb')
response = ddb.scan(TableName=os.environ['CONFIG_TABLE'])
threads = []
for i in response['Items']:
thread = threading.Thread(target=do_work, args=(i,))
threads.append(thread)
# Start threads and wait for all to finish
[t.start() for t in threads]
[t.join() for t in threads]
|
plot.py | import multiprocessing as mp
from copy import copy
import numpy as np
import tkinter
import pickle
import os
from itertools import accumulate
from matplotlib import pyplot as plt, lines
from casadi import Callback, nlpsol_out, nlpsol_n_out, Sparsity
from ..misc.data import Data
from ..misc.enums import PlotType, ControlType, InterpolationType
from ..misc.mapping import Mapping
from ..misc.utils import check_version
class CustomPlot:
def __init__(
self,
update_function,
plot_type=PlotType.PLOT,
axes_idx=None,
legend=(),
combine_to=None,
color=None,
linestyle=None,
ylim=None,
bounds=None,
):
"""
Initializes the plot.
:param update_function: Function to plot.
:param plot_type: Type of plot. (PLOT = 0, INTEGRATED = 1 or STEP = 2)
:param axes_idx: Index of the axis to be mapped. (integer)
:param legend: Legend of the graphs. (?)
:param combine_to: Plot in which to add the graph. ??
:param color: Color of the graphs. (?)
"""
self.function = update_function
self.type = plot_type
if axes_idx is None:
self.phase_mappings = None # Will be set later
elif isinstance(axes_idx, (tuple, list)):
self.phase_mappings = Mapping(axes_idx)
elif isinstance(axes_idx, Mapping):
self.phase_mappings = axes_idx
else:
raise RuntimeError("phase_mapping must be a list or a Mapping")
self.legend = legend
self.combine_to = combine_to
self.color = color
self.linestyle = linestyle
self.ylim = ylim
self.bounds = bounds
class PlotOcp:
def __init__(self, ocp, automatically_organize=True, adapt_graph_size_to_bounds=False):
"""Prepares the figure"""
for i in range(1, ocp.nb_phases):
if ocp.nlp[0].shape["q"] != ocp.nlp[i].shape["q"]:
raise RuntimeError("Graphs with nbQ different at each phase is not implemented yet")
self.ocp = ocp
self.plot_options = {
"general_options": {"use_tight_layout": False},
"non_integrated_plots": {"linestyle": "-.", "markersize": 3},
"integrated_plots": {"linestyle": "-", "markersize": 3, "linewidth": 1.1},
"bounds": {"color": "k", "linewidth": 0.4, "linestyle": "-"},
"grid": {"color": "k", "linestyle": "-", "linewidth": 0.15},
"vertical_lines": {"color": "k", "linestyle": "--", "linewidth": 1.2},
}
self.ydata = []
self.ns = 0
self.t = []
self.t_integrated = []
if isinstance(self.ocp.initial_phase_time, (int, float)):
self.tf = [self.ocp.initial_phase_time]
else:
self.tf = list(self.ocp.initial_phase_time)
self.t_idx_to_optimize = []
for i, nlp in enumerate(self.ocp.nlp):
if isinstance(nlp.tf, self.ocp.CX):
self.t_idx_to_optimize.append(i)
self.__update_time_vector()
self.axes = {}
self.plots = []
self.plots_vertical_lines = []
self.plots_bounds = []
self.all_figures = []
self.automatically_organize = automatically_organize
self._organize_windows(len(self.ocp.nlp[0].var_states) + len(self.ocp.nlp[0].var_controls))
self.plot_func = {}
self.variable_sizes = []
self.adapt_graph_size_to_bounds = adapt_graph_size_to_bounds
self.__create_plots()
horz = 0
vert = 1 if len(self.all_figures) < self.nb_vertical_windows * self.nb_horizontal_windows else 0
for i, fig in enumerate(self.all_figures):
if self.automatically_organize:
try:
fig.canvas.manager.window.move(
int(vert * self.width_step), int(self.top_margin + horz * self.height_step)
)
vert += 1
if vert >= self.nb_vertical_windows:
horz += 1
vert = 0
except AttributeError:
pass
fig.canvas.draw()
if self.plot_options["general_options"]["use_tight_layout"]:
fig.tight_layout()
def __update_time_vector(self):
"""Sets x-axis array"""
self.t = []
self.t_integrated = []
last_t = 0
for phase_idx, nlp in enumerate(self.ocp.nlp):
nb_int_steps = nlp.nb_integration_steps
dt_ns = self.tf[phase_idx] / nlp.ns
time_phase_integrated = []
last_t_int = copy(last_t)
for _ in range(nlp.ns):
time_phase_integrated.append(np.linspace(last_t_int, last_t_int + dt_ns, nb_int_steps + 1))
last_t_int += dt_ns
self.t_integrated.append(time_phase_integrated)
self.ns += nlp.ns + 1
time_phase = np.linspace(last_t, last_t + self.tf[phase_idx], nlp.ns + 1)
last_t += self.tf[phase_idx]
self.t.append(time_phase)
def __create_plots(self):
"""Actually plots"""
variable_sizes = []
for i, nlp in enumerate(self.ocp.nlp):
variable_sizes.append({})
if nlp.plot:
for key in nlp.plot:
if isinstance(nlp.plot[key], tuple):
nlp.plot[key] = nlp.plot[key][0]
if nlp.plot[key].phase_mappings is None:
size = (
nlp.plot[key]
.function(np.zeros((nlp.nx, 1)), np.zeros((nlp.nu, 1)), np.zeros((nlp.np, 1)))
.shape[0]
)
nlp.plot[key].phase_mappings = Mapping(range(size))
else:
size = len(nlp.plot[key].phase_mappings.map_idx)
if key not in variable_sizes[i]:
variable_sizes[i][key] = size
else:
variable_sizes[i][key] = max(variable_sizes[i][key], size)
self.variable_sizes = variable_sizes
if not variable_sizes:
# No graph was setup in problem_type
return
self.plot_func = {}
for i, nlp in enumerate(self.ocp.nlp):
for variable in self.variable_sizes[i]:
if nlp.plot[variable].combine_to:
self.axes[variable] = self.axes[nlp.plot[variable].combine_to]
axes = self.axes[variable][1]
elif i > 0 and variable in self.axes:
axes = self.axes[variable][1]
else:
nb = max(
[
max(nlp.plot[variable].phase_mappings.map_idx) + 1 if variable in nlp.plot else 0
for nlp in self.ocp.nlp
]
)
nb_cols, nb_rows = PlotOcp._generate_windows_size(nb)
axes = self.__add_new_axis(variable, nb, nb_rows, nb_cols)
self.axes[variable] = [nlp.plot[variable], axes]
t = self.t[i]
if variable not in self.plot_func:
self.plot_func[variable] = [None] * self.ocp.nb_phases
self.plot_func[variable][i] = nlp.plot[variable]
mapping = self.plot_func[variable][i].phase_mappings.map_idx
for ctr, k in enumerate(mapping):
ax = axes[k]
if k < len(self.plot_func[variable][i].legend):
axes[k].set_title(self.plot_func[variable][i].legend[k])
ax.grid(**self.plot_options["grid"])
ax.set_xlim(0, self.t[-1][-1])
if nlp.plot[variable].ylim:
ax.set_ylim(nlp.plot[variable].ylim)
elif self.adapt_graph_size_to_bounds and nlp.plot[variable].bounds:
if nlp.plot[variable].bounds.type != InterpolationType.CUSTOM:
y_min = nlp.plot[variable].bounds.min[ctr].min()
y_max = nlp.plot[variable].bounds.max[ctr].max()
else:
nlp.plot[variable].bounds.check_and_adjust_dimensions(len(mapping), nlp.ns)
y_min = min([nlp.plot[variable].bounds.min.evaluate_at(j)[k] for j in range(nlp.ns)])
y_max = max([nlp.plot[variable].bounds.max.evaluate_at(j)[k] for j in range(nlp.ns)])
y_range, _ = self.__compute_ylim(y_min, y_max, 1.25)
ax.set_ylim(y_range)
zero = np.zeros((t.shape[0], 1))
plot_type = self.plot_func[variable][i].type
if plot_type == PlotType.PLOT:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:green"
self.plots.append(
[
plot_type,
i,
ax.plot(t, zero, color=color, zorder=0, **self.plot_options["non_integrated_plots"])[0],
]
)
elif plot_type == PlotType.INTEGRATED:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:brown"
plots_integrated = []
nb_int_steps = nlp.nb_integration_steps
for cmp in range(nlp.ns):
plots_integrated.append(
ax.plot(
self.t_integrated[i][cmp],
np.zeros(nb_int_steps + 1),
color=color,
**self.plot_options["integrated_plots"],
)[0]
)
self.plots.append([plot_type, i, plots_integrated])
elif plot_type == PlotType.STEP:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:orange"
linestyle = (
self.plot_func[variable][i].linestyle if self.plot_func[variable][i].linestyle else "-"
)
self.plots.append(
[plot_type, i, ax.step(t, zero, linestyle, where="post", color=color, zorder=0)[0]]
)
else:
raise RuntimeError(f"{plot_type} is not implemented yet")
for j, ax in enumerate(axes):
intersections_time = self.find_phases_intersections()
for time in intersections_time:
self.plots_vertical_lines.append(ax.axvline(time, **self.plot_options["vertical_lines"]))
if nlp.plot[variable].bounds and self.adapt_graph_size_to_bounds:
if nlp.plot[variable].bounds.type == InterpolationType.EACH_FRAME:
ns = nlp.plot[variable].bounds.min.shape[1] - 1
else:
ns = nlp.ns
nlp.plot[variable].bounds.check_and_adjust_dimensions(nb_elements=len(mapping), nb_shooting=ns)
bounds_min = np.array([nlp.plot[variable].bounds.min.evaluate_at(k)[j] for k in range(ns + 1)])
bounds_max = np.array([nlp.plot[variable].bounds.max.evaluate_at(k)[j] for k in range(ns + 1)])
if bounds_min.shape[0] == nlp.ns:
bounds_min = np.concatenate((bounds_min, [bounds_min[-1]]))
bounds_max = np.concatenate((bounds_max, [bounds_max[-1]]))
self.plots_bounds.append(
[ax.step(self.t[i], bounds_min, where="post", **self.plot_options["bounds"]), i]
)
self.plots_bounds.append(
[ax.step(self.t[i], bounds_max, where="post", **self.plot_options["bounds"]), i]
)
def __add_new_axis(self, variable, nb, nb_rows, nb_cols):
"""
Sets the axis of the plots.
:param variable: Variable to plot (integer)
:param nb: Number of the figure. ?? (integer)
:param nb_rows: Number of rows of plots in subplots. (integer)
:param nb_cols: Number of columns of plots in subplots. (integer)
:return: axes: Axes of the plots. (instance of subplot class)
"""
if self.automatically_organize:
self.all_figures.append(plt.figure(variable, figsize=(self.width_step / 100, self.height_step / 131)))
else:
self.all_figures.append(plt.figure(variable))
axes = self.all_figures[-1].subplots(nb_rows, nb_cols)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
for i in range(nb, len(axes)):
axes[i].remove()
axes = axes[:nb]
idx_center = nb_rows * nb_cols - int(nb_cols / 2) - 1
if idx_center >= len(axes):
idx_center = len(axes) - 1
axes[idx_center].set_xlabel("time (s)")
self.all_figures[-1].tight_layout()
return axes
def _organize_windows(self, nb_windows):
"""
Organizes esthetically the figure.
:param nb_windows: Number of variables to plot. (integer)
"""
self.nb_vertical_windows, self.nb_horizontal_windows = PlotOcp._generate_windows_size(nb_windows)
if self.automatically_organize:
height = tkinter.Tk().winfo_screenheight()
width = tkinter.Tk().winfo_screenwidth()
self.top_margin = height / 15
self.height_step = (height - self.top_margin) / self.nb_horizontal_windows
self.width_step = width / self.nb_vertical_windows
else:
self.top_margin = None
self.height_step = None
self.width_step = None
def find_phases_intersections(self):
"""Finds the intersection between phases"""
return list(accumulate(self.tf))[:-1]
@staticmethod
def show():
plt.show()
def update_data(self, V):
"""Update of the variable V to plot (dependent axis)"""
self.ydata = []
data_states, data_controls, data_param = Data.get_data(
self.ocp, V, get_parameters=True, integrate=True, concatenate=False
)
data_param_in_dyn = np.array([data_param[key] for key in data_param if key != "time"]).squeeze()
for _ in self.ocp.nlp:
if self.t_idx_to_optimize:
for i_in_time, i_in_tf in enumerate(self.t_idx_to_optimize):
self.tf[i_in_tf] = data_param["time"][i_in_time]
self.__update_xdata()
data_states_per_phase, data_controls_per_phase = Data.get_data(self.ocp, V, integrate=True, concatenate=False)
for i, nlp in enumerate(self.ocp.nlp):
step_size = nlp.nb_integration_steps + 1
nb_elements = nlp.ns * step_size + 1
state = np.ndarray((0, nb_elements))
for s in nlp.var_states:
if isinstance(data_states_per_phase[s], (list, tuple)):
state = np.concatenate((state, data_states_per_phase[s][i]))
else:
state = np.concatenate((state, data_states_per_phase[s]))
control = np.ndarray((0, nlp.ns + 1))
for s in nlp.var_controls:
if isinstance(data_controls_per_phase[s], (list, tuple)):
control = np.concatenate((control, data_controls_per_phase[s][i]))
else:
control = np.concatenate((control, data_controls_per_phase[s]))
if nlp.control_type == ControlType.CONSTANT:
u_mod = 1
elif nlp.control_type == ControlType.LINEAR_CONTINUOUS:
u_mod = 2
else:
raise NotImplementedError(f"Plotting {nlp.control_type} is not implemented yet")
for key in self.variable_sizes[i]:
if self.plot_func[key][i].type == PlotType.INTEGRATED:
all_y = []
for idx, t in enumerate(self.t_integrated[i]):
y_tp = np.empty((self.variable_sizes[i][key], len(t)))
y_tp.fill(np.nan)
y_tp[:, :] = self.plot_func[key][i].function(
state[:, step_size * idx : step_size * (idx + 1)],
control[:, idx : idx + u_mod],
data_param_in_dyn,
)
all_y.append(y_tp)
for idx in range(len(self.plot_func[key][i].phase_mappings.map_idx)):
y_tp = []
for y in all_y:
y_tp.append(y[idx, :])
self.__append_to_ydata([y_tp])
else:
y = np.empty((self.variable_sizes[i][key], len(self.t[i])))
y.fill(np.nan)
try:
y[:, :] = self.plot_func[key][i].function(state[:, ::step_size], control, data_param_in_dyn)
except ValueError:
raise ValueError(
f"Wrong dimensions for plot {key}. Got "
f"{self.plot_func[key][i].function(state[:, ::step_size], control, data_param_in_dyn).shape}"
f", but expected {y.shape}"
)
self.__append_to_ydata(y)
self.__update_axes()
def __update_xdata(self):
"""Update of the time in plots (independent axis)"""
self.__update_time_vector()
for plot in self.plots:
phase_idx = plot[1]
if plot[0] == PlotType.INTEGRATED:
for cmp, p in enumerate(plot[2]):
p.set_xdata(self.t_integrated[phase_idx][cmp])
ax = plot[2][-1].axes
else:
plot[2].set_xdata(self.t[phase_idx])
ax = plot[2].axes
ax.set_xlim(0, self.t[-1][-1])
if self.plots_bounds:
for plot_bounds in self.plots_bounds:
plot_bounds[0][0].set_xdata(self.t[plot_bounds[1]])
ax = plot_bounds[0][0].axes
ax.set_xlim(0, self.t[-1][-1])
intersections_time = self.find_phases_intersections()
n = len(intersections_time)
if n > 0:
for p in range(int(len(self.plots_vertical_lines) / n)):
for i, time in enumerate(intersections_time):
self.plots_vertical_lines[p * n + i].set_xdata([time, time])
def __append_to_ydata(self, data):
for y in data:
self.ydata.append(y)
def __update_axes(self):
"""Updates axes ranges"""
assert len(self.plots) == len(self.ydata)
for i, plot in enumerate(self.plots):
y = self.ydata[i]
if plot[0] == PlotType.INTEGRATED:
for cmp, p in enumerate(plot[2]):
p.set_ydata(y[cmp])
else:
plot[2].set_ydata(y)
for p in self.plots_vertical_lines:
p.set_ydata((np.nan, np.nan))
for key in self.axes:
if (not self.adapt_graph_size_to_bounds) or (self.axes[key][0].bounds is None):
for i, ax in enumerate(self.axes[key][1]):
if not self.axes[key][0].ylim:
y_max = -np.inf
y_min = np.inf
for p in ax.get_children():
if isinstance(p, lines.Line2D):
y_min = min(y_min, np.min(p.get_ydata()))
y_max = max(y_max, np.max(p.get_ydata()))
y_range, data_range = self.__compute_ylim(y_min, y_max, 1.25)
ax.set_ylim(y_range)
ax.set_yticks(
np.arange(
y_range[0],
y_range[1],
step=data_range / 4,
)
)
for p in self.plots_vertical_lines:
p.set_ydata((0, 1))
@staticmethod
def __compute_ylim(min_val, max_val, threshold):
if np.isnan(min_val) or np.isinf(min_val):
min_val = 0
if np.isnan(max_val) or np.isinf(max_val):
max_val = 1
data_mean = np.mean((min_val, max_val))
data_range = max_val - min_val
if np.abs(data_range) < 0.8:
data_range = 0.8
y_range = (threshold * data_range) / 2
y_range = data_mean - y_range, data_mean + y_range
return y_range, data_range
@staticmethod
def _generate_windows_size(nb):
"""
Defines the number of column and rows of subplots in function of the number of variables to plot.
:param nb: Number of variables to plot. (integer)
:return: nb_rows: Number of rows of subplot. (integer)
"""
nb_rows = int(round(np.sqrt(nb)))
return nb_rows + 1 if nb_rows * nb_rows < nb else nb_rows, nb_rows
class ShowResult:
def __init__(self, ocp, sol):
self.ocp = ocp
self.sol = sol
def graphs(self, automatically_organize=True, adapt_graph_size_to_bounds=False, show_now=True):
plot_ocp = PlotOcp(
self.ocp,
automatically_organize=automatically_organize,
adapt_graph_size_to_bounds=adapt_graph_size_to_bounds,
)
plot_ocp.update_data(self.sol["x"])
if show_now:
plt.show()
def animate(self, nb_frames=80, show_now=True, **kwargs):
"""
Animate solution with bioviz
:param nb_frames: Number of frames in the animation. (integer)
:param show_now: If updates must be automatically done (True) or not (False)
"""
try:
import bioviz
except ModuleNotFoundError:
raise RuntimeError("bioviz must be install to animate the model")
check_version(bioviz, "2.0.1", "2.1.0")
data_interpolate, data_control = Data.get_data(
self.ocp, self.sol["x"], integrate=False, interpolate_nb_frames=nb_frames
)
if not isinstance(data_interpolate["q"], (list, tuple)):
data_interpolate["q"] = [data_interpolate["q"]]
all_bioviz = []
for idx_phase, data in enumerate(data_interpolate["q"]):
all_bioviz.append(bioviz.Viz(loaded_model=self.ocp.nlp[idx_phase].model, **kwargs))
all_bioviz[-1].load_movement(self.ocp.nlp[idx_phase].mapping["q"].expand.map(data))
if show_now:
b_is_visible = [True] * len(all_bioviz)
while sum(b_is_visible):
for i, b in enumerate(all_bioviz):
if b.vtk_window.is_active:
b.update()
else:
b_is_visible[i] = False
else:
return all_bioviz
@staticmethod
def keep_matplotlib():
plt.figure(figsize=(0.01, 0.01)).canvas.manager.window.move(1000, 100)
plt.show()
class OnlineCallback(Callback):
def __init__(self, ocp, opts={}):
Callback.__init__(self)
self.nlp = ocp
self.nx = ocp.V.rows()
self.ng = 0
self.construct("AnimateCallback", opts)
self.queue = mp.Queue()
self.plotter = self.ProcessPlotter(ocp)
self.plot_process = mp.Process(target=self.plotter, args=(self.queue,), daemon=True)
self.plot_process.start()
@staticmethod
def get_n_in():
return nlpsol_n_out()
@staticmethod
def get_n_out():
return 1
@staticmethod
def get_name_in(i):
return nlpsol_out(i)
@staticmethod
def get_name_out(_):
return "ret"
def get_sparsity_in(self, i):
n = nlpsol_out(i)
if n == "f":
return Sparsity.scalar()
elif n in ("x", "lam_x"):
return Sparsity.dense(self.nx)
elif n in ("g", "lam_g"):
return Sparsity.dense(self.ng)
else:
return Sparsity(0, 0)
def eval(self, arg):
send = self.queue.put
send(arg[0])
return [0]
class ProcessPlotter(object):
def __init__(self, ocp):
self.ocp = ocp
def __call__(self, pipe):
self.pipe = pipe
self.plot = PlotOcp(self.ocp)
timer = self.plot.all_figures[0].canvas.new_timer(interval=10)
timer.add_callback(self.callback)
timer.start()
plt.show()
def callback(self):
while not self.pipe.empty():
V = self.pipe.get()
self.plot.update_data(V)
Iterations.save(V)
for i, fig in enumerate(self.plot.all_figures):
fig.canvas.draw()
return True
class Iterations:
@staticmethod
def save(V):
file_path = ".__tmp_bioptim/temp_save_iter.bobo"
if os.path.isfile(file_path):
with open(file_path, "rb") as file:
previews_iterations = pickle.load(file)
previews_iterations.append(np.array(V))
with open(file_path, "wb") as file:
pickle.dump(previews_iterations, file)
|
utils.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Miscellaneous helper functions."""
import copy
import queue
import threading
from typing import Dict, List, TypeVar, Callable, Any
K = TypeVar('K')
V = TypeVar('V')
def coerce_bool(value) -> bool:
if isinstance(value, (bool, int, float, list, dict)):
return bool(value)
elif value is None:
return False
elif str(value).lower() in ['', '0', 'false']:
return False
else:
return True
def find_keys(d: Dict[K, V], predicate: Callable[[V], bool]) -> List[K]:
"""Find keys where values match predicate."""
return [k for k, v in d.items() if predicate(v)]
def find_spec_keys(d: Dict[K, Any], types) -> List[K]:
"""Find keys where values match one or more types."""
return find_keys(d, lambda v: isinstance(v, types))
def filter_by_keys(d: Dict[K, V], predicate: Callable[[K], bool]) -> Dict[K, V]:
"""Filter to keys matching predicate."""
return {k: v for k, v in d.items() if predicate(k)}
def copy_and_update(d: Dict[K, Any], patch: Dict[K, Any]) -> Dict[K, Any]:
"""Make a copy of d and apply the patch to a subset of fields."""
ret = copy.copy(d)
ret.update(patch)
return ret
def remap_dict(d: Dict[K, V], keymap: Dict[K, K]) -> Dict[K, V]:
"""Return a (shallow) copy of d with some fields renamed.
Keys which are not in keymap are left alone.
Args:
d: dict to rename
keymap: map of old key -> new key
Returns:
new dict with fields renamed
"""
return {keymap.get(k, k): d[k] for k in d}
def _extract_batch_length(preds):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in preds.items():
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length than others.' % key)
return batch_length
def unbatch_preds(preds):
"""Unbatch predictions, as in estimator.predict().
Args:
preds: Dict[str, np.ndarray], where all arrays have the same first
dimension.
Yields:
sequence of Dict[str, np.ndarray], with the same keys as preds.
"""
if not isinstance(preds, dict):
for pred in preds:
yield pred
else:
for i in range(_extract_batch_length(preds)):
yield {key: value[i] for key, value in preds.items()}
class TaskQueue(queue.Queue):
"""A simple task queue for processing jobs in a thread pool."""
def __init__(self, num_workers=1):
# TODO(lit-dev): Could use QueueHandler and QueueListener for this.
queue.Queue.__init__(self)
self.num_workers = num_workers
self.start_workers()
def add_task(self, task, *args, **kwargs):
args = args or ()
kwargs = kwargs or {}
self.put((task, args, kwargs))
def start_workers(self):
for _ in range(self.num_workers):
t = threading.Thread(target=self.worker)
t.daemon = True
t.start()
def worker(self):
while True:
item, args, kwargs = self.get()
item(*args, **kwargs)
self.task_done()
|
mpv.py | # -*- coding: utf-8 -*-
# vim: ts=4 sw=4 et
#
# Python MPV library module
# Copyright (C) 2017-2020 Sebastian Götte <code@jaseg.net>
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
from ctypes import *
import ctypes.util
import threading
import os
import sys
from warnings import warn
from functools import partial, wraps
from contextlib import contextmanager
import collections
import re
import traceback
if os.name == 'nt':
dll = ctypes.util.find_library('mpv-1.dll')
if dll is None:
raise OSError('Cannot find mpv-1.dll in your system %PATH%. One way to deal with this is to ship mpv-1.dll '
'with your script and put the directory your script is in into %PATH% before "import mpv": '
'os.environ["PATH"] = os.path.dirname(__file__) + os.pathsep + os.environ["PATH"] '
'If mpv-1.dll is located elsewhere, you can add that path to os.environ["PATH"].')
backend = CDLL(dll)
fs_enc = 'utf-8'
else:
import locale
lc, enc = locale.getlocale(locale.LC_NUMERIC)
# libmpv requires LC_NUMERIC to be set to "C". Since messing with global variables everyone else relies upon is
# still better than segfaulting, we are setting LC_NUMERIC to "C".
locale.setlocale(locale.LC_NUMERIC, 'C')
sofile = ctypes.util.find_library('mpv')
if sofile is None:
raise OSError("Cannot find libmpv in the usual places. Depending on your distro, you may try installing an "
"mpv-devel or mpv-libs package. If you have libmpv around but this script can't find it, consult "
"the documentation for ctypes.util.find_library which this script uses to look up the library "
"filename.")
backend = CDLL(sofile)
fs_enc = sys.getfilesystemencoding()
class ShutdownError(SystemError):
pass
class MpvHandle(c_void_p):
pass
class MpvRenderCtxHandle(c_void_p):
pass
class MpvOpenGLCbContext(c_void_p):
pass
class PropertyUnavailableError(AttributeError):
pass
class ErrorCode(object):
"""For documentation on these, see mpv's libmpv/client.h."""
SUCCESS = 0
EVENT_QUEUE_FULL = -1
NOMEM = -2
UNINITIALIZED = -3
INVALID_PARAMETER = -4
OPTION_NOT_FOUND = -5
OPTION_FORMAT = -6
OPTION_ERROR = -7
PROPERTY_NOT_FOUND = -8
PROPERTY_FORMAT = -9
PROPERTY_UNAVAILABLE = -10
PROPERTY_ERROR = -11
COMMAND = -12
LOADING_FAILED = -13
AO_INIT_FAILED = -14
VO_INIT_FAILED = -15
NOTHING_TO_PLAY = -16
UNKNOWN_FORMAT = -17
UNSUPPORTED = -18
NOT_IMPLEMENTED = -19
GENERIC = -20
EXCEPTION_DICT = {
0: None,
-1: lambda *a: MemoryError('mpv event queue full', *a),
-2: lambda *a: MemoryError('mpv cannot allocate memory', *a),
-3: lambda *a: ValueError('Uninitialized mpv handle used', *a),
-4: lambda *a: ValueError('Invalid value for mpv parameter', *a),
-5: lambda *a: AttributeError('mpv option does not exist', *a),
-6: lambda *a: TypeError('Tried to set mpv option using wrong format', *a),
-7: lambda *a: ValueError('Invalid value for mpv option', *a),
-8: lambda *a: AttributeError('mpv property does not exist', *a),
# Currently (mpv 0.18.1) there is a bug causing a PROPERTY_FORMAT error to be returned instead of
# INVALID_PARAMETER when setting a property-mapped option to an invalid value.
-9: lambda *a: TypeError('Tried to get/set mpv property using wrong format, or passed invalid value', *a),
-10: lambda *a: PropertyUnavailableError('mpv property is not available', *a),
-11: lambda *a: RuntimeError('Generic error getting or setting mpv property', *a),
-12: lambda *a: SystemError('Error running mpv command', *a),
-14: lambda *a: RuntimeError('Initializing the audio output failed', *a),
-15: lambda *a: RuntimeError('Initializing the video output failed'),
-16: lambda *a: RuntimeError('There was no audio or video data to play. This also happens if the file '
'was recognized, but did not contain any audio or video streams, or no '
'streams were selected.'),
-17: lambda *a: RuntimeError('When trying to load the file, the file format could not be determined, '
'or the file was too broken to open it'),
-18: lambda *a: ValueError('Generic error for signaling that certain system requirements are not fulfilled'),
-19: lambda *a: NotImplementedError('The API function which was called is a stub only'),
-20: lambda *a: RuntimeError('Unspecified error')}
@staticmethod
def default_error_handler(ec, *args):
return ValueError(_mpv_error_string(ec).decode('utf-8'), ec, *args)
@classmethod
def raise_for_ec(kls, ec, func, *args):
ec = 0 if ec > 0 else ec
ex = kls.EXCEPTION_DICT.get(ec, kls.default_error_handler)
if ex:
raise ex(ec, *args)
MpvGlGetProcAddressFn = CFUNCTYPE(c_void_p, c_void_p, c_char_p)
class MpvOpenGLInitParams(Structure):
_fields_ = [('get_proc_address', MpvGlGetProcAddressFn),
('get_proc_address_ctx', c_void_p),
('extra_exts', c_void_p)]
def __init__(self, get_proc_address):
self.get_proc_address = get_proc_address
self.get_proc_address_ctx = None
self.extra_exts = None
class MpvOpenGLFBO(Structure):
_fields_ = [('fbo', c_int),
('w', c_int),
('h', c_int),
('internal_format', c_int)]
def __init__(self, w, h, fbo=0, internal_format=0):
self.w, self.h = w, h
self.fbo = fbo
self.internal_format = internal_format
class MpvRenderFrameInfo(Structure):
_fields_ = [('flags', c_int64),
('target_time', c_int64)]
def as_dict(self):
return {'flags': self.flags,
'target_time': self.target_time}
class MpvOpenGLDRMParams(Structure):
_fields_ = [('fd', c_int),
('crtc_id', c_int),
('connector_id', c_int),
('atomic_request_ptr', c_void_p),
('render_fd', c_int)]
class MpvOpenGLDRMDrawSurfaceSize(Structure):
_fields_ = [('width', c_int), ('height', c_int)]
class MpvOpenGLDRMParamsV2(Structure):
_fields_ = [('fd', c_int),
('crtc_id', c_int),
('connector_id', c_int),
('atomic_request_ptr', c_void_p),
('render_fd', c_int)]
def __init__(self, crtc_id, connector_id, atomic_request_ptr, fd=-1, render_fd=-1):
self.crtc_id, self.connector_id = crtc_id, connector_id
self.atomic_request_ptr = atomic_request_ptr
self.fd, self.render_fd = fd, render_fd
class MpvRenderParam(Structure):
_fields_ = [('type_id', c_int),
('data', c_void_p)]
# maps human-readable type name to (type_id, argtype) tuple.
# The type IDs come from libmpv/render.h
TYPES = {"invalid": (0, None),
"api_type": (1, str),
"opengl_init_params": (2, MpvOpenGLInitParams),
"opengl_fbo": (3, MpvOpenGLFBO),
"flip_y": (4, bool),
"depth": (5, int),
"icc_profile": (6, bytes),
"ambient_light": (7, int),
"x11_display": (8, c_void_p),
"wl_display": (9, c_void_p),
"advanced_control": (10, bool),
"next_frame_info": (11, MpvRenderFrameInfo),
"block_for_target_time": (12, bool),
"skip_rendering": (13, bool),
"drm_display": (14, MpvOpenGLDRMParams),
"drm_draw_surface_size": (15, MpvOpenGLDRMDrawSurfaceSize),
"drm_display_v2": (16, MpvOpenGLDRMParamsV2)}
def __init__(self, name, value=None):
if name not in self.TYPES:
raise ValueError('unknown render param type "{}"'.format(name))
self.type_id, cons = self.TYPES[name]
if cons is None:
self.value = None
self.data = c_void_p()
elif cons is str:
self.value = value
self.data = cast(c_char_p(value.encode('utf-8')), c_void_p)
elif cons is bytes:
self.value = MpvByteArray(value)
self.data = cast(pointer(self.value), c_void_p)
elif cons is bool:
self.value = c_int(int(bool(value)))
self.data = cast(pointer(self.value), c_void_p)
else:
self.value = cons(**value)
self.data = cast(pointer(self.value), c_void_p)
def kwargs_to_render_param_array(kwargs):
t = MpvRenderParam * (len(kwargs) + 1)
return t(*kwargs.items(), ('invalid', None))
class MpvFormat(c_int):
NONE = 0
STRING = 1
OSD_STRING = 2
FLAG = 3
INT64 = 4
DOUBLE = 5
NODE = 6
NODE_ARRAY = 7
NODE_MAP = 8
BYTE_ARRAY = 9
def __eq__(self, other):
return self is other or self.value == other or self.value == int(other)
def __repr__(self):
return ['NONE', 'STRING', 'OSD_STRING', 'FLAG', 'INT64', 'DOUBLE', 'NODE', 'NODE_ARRAY', 'NODE_MAP',
'BYTE_ARRAY'][self.value]
def __hash__(self):
return self.value
class MpvEventID(c_int):
NONE = 0
SHUTDOWN = 1
LOG_MESSAGE = 2
GET_PROPERTY_REPLY = 3
SET_PROPERTY_REPLY = 4
COMMAND_REPLY = 5
START_FILE = 6
END_FILE = 7
FILE_LOADED = 8
TRACKS_CHANGED = 9
TRACK_SWITCHED = 10
IDLE = 11
PAUSE = 12
UNPAUSE = 13
TICK = 14
SCRIPT_INPUT_DISPATCH = 15
CLIENT_MESSAGE = 16
VIDEO_RECONFIG = 17
AUDIO_RECONFIG = 18
METADATA_UPDATE = 19
SEEK = 20
PLAYBACK_RESTART = 21
PROPERTY_CHANGE = 22
CHAPTER_CHANGE = 23
ANY = (SHUTDOWN, LOG_MESSAGE, GET_PROPERTY_REPLY, SET_PROPERTY_REPLY, COMMAND_REPLY, START_FILE, END_FILE,
FILE_LOADED, TRACKS_CHANGED, TRACK_SWITCHED, IDLE, PAUSE, UNPAUSE, TICK, SCRIPT_INPUT_DISPATCH,
CLIENT_MESSAGE, VIDEO_RECONFIG, AUDIO_RECONFIG, METADATA_UPDATE, SEEK, PLAYBACK_RESTART, PROPERTY_CHANGE,
CHAPTER_CHANGE)
def __repr__(self):
return ['NONE', 'SHUTDOWN', 'LOG_MESSAGE', 'GET_PROPERTY_REPLY', 'SET_PROPERTY_REPLY', 'COMMAND_REPLY',
'START_FILE', 'END_FILE', 'FILE_LOADED', 'TRACKS_CHANGED', 'TRACK_SWITCHED', 'IDLE', 'PAUSE', 'UNPAUSE',
'TICK', 'SCRIPT_INPUT_DISPATCH', 'CLIENT_MESSAGE', 'VIDEO_RECONFIG', 'AUDIO_RECONFIG',
'METADATA_UPDATE', 'SEEK', 'PLAYBACK_RESTART', 'PROPERTY_CHANGE', 'CHAPTER_CHANGE'][self.value]
@classmethod
def from_str(kls, s):
return getattr(kls, s.upper().replace('-', '_'))
identity_decoder = lambda b: b
strict_decoder = lambda b: b.decode('utf-8')
def lazy_decoder(b):
try:
return b.decode('utf-8')
except UnicodeDecodeError:
return b
class MpvNodeList(Structure):
def array_value(self, decoder=identity_decoder):
return [self.values[i].node_value(decoder) for i in range(self.num)]
def dict_value(self, decoder=identity_decoder):
return {self.keys[i].decode('utf-8'):
self.values[i].node_value(decoder) for i in range(self.num)}
class MpvByteArray(Structure):
_fields_ = [('data', c_void_p),
('size', c_size_t)]
def __init__(self, value):
self._value = value
self.data = cast(c_char_p(value), c_void_p)
self.size = len(value)
def bytes_value(self):
return cast(self.data, POINTER(c_char))[:self.size]
class MpvNode(Structure):
def node_value(self, decoder=identity_decoder):
return MpvNode.node_cast_value(self.val, self.format.value, decoder)
@staticmethod
def node_cast_value(v, fmt=MpvFormat.NODE, decoder=identity_decoder):
if fmt == MpvFormat.NONE:
return None
elif fmt == MpvFormat.STRING:
return decoder(v.string)
elif fmt == MpvFormat.OSD_STRING:
return v.string.decode('utf-8')
elif fmt == MpvFormat.FLAG:
return bool(v.flag)
elif fmt == MpvFormat.INT64:
return v.int64
elif fmt == MpvFormat.DOUBLE:
return v.double
else:
if not v.node: # Check for null pointer
return None
if fmt == MpvFormat.NODE:
return v.node.contents.node_value(decoder)
elif fmt == MpvFormat.NODE_ARRAY:
return v.list.contents.array_value(decoder)
elif fmt == MpvFormat.NODE_MAP:
return v.map.contents.dict_value(decoder)
elif fmt == MpvFormat.BYTE_ARRAY:
return v.byte_array.contents.bytes_value()
else:
raise TypeError('Unknown MPV node format {}. Please submit a bug report.'.format(fmt))
class MpvNodeUnion(Union):
_fields_ = [('string', c_char_p),
('flag', c_int),
('int64', c_int64),
('double', c_double),
('node', POINTER(MpvNode)),
('list', POINTER(MpvNodeList)),
('map', POINTER(MpvNodeList)),
('byte_array', POINTER(MpvByteArray))]
MpvNode._fields_ = [('val', MpvNodeUnion),
('format', MpvFormat)]
MpvNodeList._fields_ = [('num', c_int),
('values', POINTER(MpvNode)),
('keys', POINTER(c_char_p))]
class MpvSubApi(c_int):
MPV_SUB_API_OPENGL_CB = 1
class MpvEvent(Structure):
_fields_ = [('event_id', MpvEventID),
('error', c_int),
('reply_userdata', c_ulonglong),
('data', c_void_p)]
def as_dict(self, decoder=identity_decoder):
dtype = {MpvEventID.END_FILE: MpvEventEndFile,
MpvEventID.PROPERTY_CHANGE: MpvEventProperty,
MpvEventID.GET_PROPERTY_REPLY: MpvEventProperty,
MpvEventID.LOG_MESSAGE: MpvEventLogMessage,
MpvEventID.SCRIPT_INPUT_DISPATCH: MpvEventScriptInputDispatch,
MpvEventID.CLIENT_MESSAGE: MpvEventClientMessage
}.get(self.event_id.value, None)
return {'event_id': self.event_id.value,
'error': self.error,
'reply_userdata': self.reply_userdata,
'event': cast(self.data, POINTER(dtype)).contents.as_dict(decoder=decoder) if dtype else None}
class MpvEventProperty(Structure):
_fields_ = [('name', c_char_p),
('format', MpvFormat),
('data', MpvNodeUnion)]
def as_dict(self, decoder=identity_decoder):
value = MpvNode.node_cast_value(self.data, self.format.value, decoder)
return {'name': self.name.decode('utf-8'),
'format': self.format,
'data': self.data,
'value': value}
class MpvEventLogMessage(Structure):
_fields_ = [('prefix', c_char_p),
('level', c_char_p),
('text', c_char_p)]
def as_dict(self, decoder=identity_decoder):
return {'prefix': self.prefix.decode('utf-8'),
'level': self.level.decode('utf-8'),
'text': decoder(self.text).rstrip()}
class MpvEventEndFile(Structure):
_fields_ = [('reason', c_int),
('error', c_int)]
EOF = 0
RESTARTED = 1
ABORTED = 2
QUIT = 3
ERROR = 4
REDIRECT = 5
# For backwards-compatibility
@property
def value(self):
return self.reason
def as_dict(self, decoder=identity_decoder):
return {'reason': self.reason, 'error': self.error}
class MpvEventScriptInputDispatch(Structure):
_fields_ = [('arg0', c_int),
('type', c_char_p)]
def as_dict(self, decoder=identity_decoder):
pass # TODO
class MpvEventClientMessage(Structure):
_fields_ = [('num_args', c_int),
('args', POINTER(c_char_p))]
def as_dict(self, decoder=identity_decoder):
return {'args': [self.args[i].decode('utf-8') for i in range(self.num_args)]}
StreamReadFn = CFUNCTYPE(c_int64, c_void_p, POINTER(c_char), c_uint64)
StreamSeekFn = CFUNCTYPE(c_int64, c_void_p, c_int64)
StreamSizeFn = CFUNCTYPE(c_int64, c_void_p)
StreamCloseFn = CFUNCTYPE(None, c_void_p)
StreamCancelFn = CFUNCTYPE(None, c_void_p)
class StreamCallbackInfo(Structure):
_fields_ = [('cookie', c_void_p),
('read', StreamReadFn),
('seek', StreamSeekFn),
('size', StreamSizeFn),
('close', StreamCloseFn), ]
# ('cancel', StreamCancelFn)]
StreamOpenFn = CFUNCTYPE(c_int, c_void_p, c_char_p, POINTER(StreamCallbackInfo))
WakeupCallback = CFUNCTYPE(None, c_void_p)
RenderUpdateFn = CFUNCTYPE(None, c_void_p)
OpenGlCbUpdateFn = CFUNCTYPE(None, c_void_p)
OpenGlCbGetProcAddrFn = CFUNCTYPE(c_void_p, c_void_p, c_char_p)
def _handle_func(name, args, restype, errcheck, ctx=MpvHandle, deprecated=False):
func = getattr(backend, name)
func.argtypes = [ctx] + args if ctx else args
if restype is not None:
func.restype = restype
if errcheck is not None:
func.errcheck = errcheck
if deprecated:
@wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.warned: # Only warn on first invocation to prevent spamming
warn("Backend C api has been deprecated: " + name, DeprecationWarning, stacklevel=2)
wrapper.warned = True
return func(*args, **kwargs)
wrapper.warned = False
globals()['_' + name] = wrapper
else:
globals()['_' + name] = func
def bytes_free_errcheck(res, func, *args):
notnull_errcheck(res, func, *args)
rv = cast(res, c_void_p).value
_mpv_free(res)
return rv
def notnull_errcheck(res, func, *args):
if res is None:
raise RuntimeError('Underspecified error in MPV when calling {} with args {!r}: NULL pointer returned.' \
'Please consult your local debugger.'.format(func.__name__, args))
return res
ec_errcheck = ErrorCode.raise_for_ec
def _handle_gl_func(name, args=[], restype=None, deprecated=False):
_handle_func(name, args, restype, errcheck=None, ctx=MpvOpenGLCbContext, deprecated=deprecated)
backend.mpv_client_api_version.restype = c_ulong
def _mpv_client_api_version():
ver = backend.mpv_client_api_version()
return ver >> 16, ver & 0xFFFF
backend.mpv_free.argtypes = [c_void_p]
_mpv_free = backend.mpv_free
backend.mpv_free_node_contents.argtypes = [c_void_p]
_mpv_free_node_contents = backend.mpv_free_node_contents
backend.mpv_create.restype = MpvHandle
_mpv_create = backend.mpv_create
_handle_func('mpv_create_client', [c_char_p], MpvHandle, notnull_errcheck)
_handle_func('mpv_client_name', [], c_char_p, errcheck=None)
_handle_func('mpv_initialize', [], c_int, ec_errcheck)
_handle_func('mpv_detach_destroy', [], None, errcheck=None)
_handle_func('mpv_terminate_destroy', [], None, errcheck=None)
_handle_func('mpv_load_config_file', [c_char_p], c_int, ec_errcheck)
_handle_func('mpv_get_time_us', [], c_ulonglong, errcheck=None)
_handle_func('mpv_set_option', [c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_set_option_string', [c_char_p, c_char_p], c_int, ec_errcheck)
_handle_func('mpv_command', [POINTER(c_char_p)], c_int, ec_errcheck)
_handle_func('mpv_command_string', [c_char_p, c_char_p], c_int, ec_errcheck)
_handle_func('mpv_command_async', [c_ulonglong, POINTER(c_char_p)], c_int, ec_errcheck)
_handle_func('mpv_command_node', [POINTER(MpvNode), POINTER(MpvNode)], c_int, ec_errcheck)
_handle_func('mpv_command_async', [c_ulonglong, POINTER(MpvNode)], c_int, ec_errcheck)
_handle_func('mpv_set_property', [c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_set_property_string', [c_char_p, c_char_p], c_int, ec_errcheck)
_handle_func('mpv_set_property_async', [c_ulonglong, c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_get_property', [c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_get_property_string', [c_char_p], c_void_p, bytes_free_errcheck)
_handle_func('mpv_get_property_osd_string', [c_char_p], c_void_p, bytes_free_errcheck)
_handle_func('mpv_get_property_async', [c_ulonglong, c_char_p, MpvFormat], c_int, ec_errcheck)
_handle_func('mpv_observe_property', [c_ulonglong, c_char_p, MpvFormat], c_int, ec_errcheck)
_handle_func('mpv_unobserve_property', [c_ulonglong], c_int, ec_errcheck)
_handle_func('mpv_event_name', [c_int], c_char_p, errcheck=None, ctx=None)
_handle_func('mpv_error_string', [c_int], c_char_p, errcheck=None, ctx=None)
_handle_func('mpv_request_event', [MpvEventID, c_int], c_int, ec_errcheck)
_handle_func('mpv_request_log_messages', [c_char_p], c_int, ec_errcheck)
_handle_func('mpv_wait_event', [c_double], POINTER(MpvEvent), errcheck=None)
_handle_func('mpv_wakeup', [], None, errcheck=None)
_handle_func('mpv_set_wakeup_callback', [WakeupCallback, c_void_p], None, errcheck=None)
_handle_func('mpv_get_wakeup_pipe', [], c_int, errcheck=None)
_handle_func('mpv_stream_cb_add_ro', [c_char_p, c_void_p, StreamOpenFn], c_int, ec_errcheck)
_handle_func('mpv_render_context_create', [MpvRenderCtxHandle, MpvHandle, POINTER(MpvRenderParam)], c_int, ec_errcheck,
ctx=None)
_handle_func('mpv_render_context_set_parameter', [MpvRenderParam], c_int, ec_errcheck, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_get_info', [MpvRenderParam], c_int, ec_errcheck, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_set_update_callback', [RenderUpdateFn, c_void_p], None, errcheck=None,
ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_update', [], c_int64, errcheck=None, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_render', [POINTER(MpvRenderParam)], c_int, ec_errcheck, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_report_swap', [], None, errcheck=None, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_free', [], None, errcheck=None, ctx=MpvRenderCtxHandle)
# Deprecated in v0.29.0 and may disappear eventually
if hasattr(backend, 'mpv_get_sub_api'):
_handle_func('mpv_get_sub_api', [MpvSubApi], c_void_p, notnull_errcheck, deprecated=True)
_handle_gl_func('mpv_opengl_cb_set_update_callback', [OpenGlCbUpdateFn, c_void_p], deprecated=True)
_handle_gl_func('mpv_opengl_cb_init_gl', [c_char_p, OpenGlCbGetProcAddrFn, c_void_p], c_int, deprecated=True)
_handle_gl_func('mpv_opengl_cb_draw', [c_int, c_int, c_int], c_int, deprecated=True)
_handle_gl_func('mpv_opengl_cb_render', [c_int, c_int], c_int, deprecated=True)
_handle_gl_func('mpv_opengl_cb_report_flip', [c_ulonglong], c_int, deprecated=True)
_handle_gl_func('mpv_opengl_cb_uninit_gl', [], c_int, deprecated=True)
def _mpv_coax_proptype(value, proptype=str):
"""Intelligently coax the given python value into something that can be understood as a proptype property."""
if type(value) is bytes:
return value;
elif type(value) is bool:
return b'yes' if value else b'no'
elif proptype in (str, int, float):
return str(proptype(value)).encode('utf-8')
else:
raise TypeError('Cannot coax value of type {} into property type {}'.format(type(value), proptype))
def _make_node_str_list(l):
"""Take a list of python objects and make a MPV string node array from it.
As an example, the python list ``l = [ "foo", 23, false ]`` will result in the following MPV node object::
struct mpv_node {
.format = MPV_NODE_ARRAY,
.u.list = *(struct mpv_node_array){
.num = len(l),
.keys = NULL,
.values = struct mpv_node[len(l)] {
{ .format = MPV_NODE_STRING, .u.string = l[0] },
{ .format = MPV_NODE_STRING, .u.string = l[1] },
...
}
}
}
"""
char_ps = [c_char_p(_mpv_coax_proptype(e, str)) for e in l]
node_list = MpvNodeList(
num=len(l),
keys=None,
values=(MpvNode * len(l))(*[MpvNode(
format=MpvFormat.STRING,
val=MpvNodeUnion(string=p))
for p in char_ps]))
node = MpvNode(
format=MpvFormat.NODE_ARRAY,
val=MpvNodeUnion(list=pointer(node_list)))
return char_ps, node_list, node, cast(pointer(node), c_void_p)
def _event_generator(handle):
while True:
event = _mpv_wait_event(handle, -1).contents
if event.event_id.value == MpvEventID.NONE:
raise StopIteration()
yield event
_py_to_mpv = lambda name: name.replace('_', '-')
_mpv_to_py = lambda name: name.replace('-', '_')
_drop_nones = lambda *args: [arg for arg in args if arg is not None]
class _Proxy:
def __init__(self, mpv):
super().__setattr__('mpv', mpv)
class _PropertyProxy(_Proxy):
def __dir__(self):
return super().__dir__() + [name.replace('-', '_') for name in self.mpv.property_list]
class _FileLocalProxy(_Proxy):
def __getitem__(self, name):
return self.mpv.__getitem__(name, file_local=True)
def __setitem__(self, name, value):
return self.mpv.__setitem__(name, value, file_local=True)
def __iter__(self):
return iter(self.mpv)
class _OSDPropertyProxy(_PropertyProxy):
def __getattr__(self, name):
return self.mpv._get_property(_py_to_mpv(name), fmt=MpvFormat.OSD_STRING)
def __setattr__(self, _name, _value):
raise AttributeError('OSD properties are read-only. Please use the regular property API for writing.')
class _DecoderPropertyProxy(_PropertyProxy):
def __init__(self, mpv, decoder):
super().__init__(mpv)
super().__setattr__('_decoder', decoder)
def __getattr__(self, name):
return self.mpv._get_property(_py_to_mpv(name), decoder=self._decoder)
def __setattr__(self, name, value):
setattr(self.mpv, _py_to_mpv(name), value)
class GeneratorStream:
"""Transform a python generator into an mpv-compatible stream object. This only supports size() and read(), and
does not support seek(), close() or cancel().
"""
def __init__(self, generator_fun, size=None):
self._generator_fun = generator_fun
self.size = size
def seek(self, offset):
self._read_iter = iter(self._generator_fun())
self._read_chunk = b''
return 0 # We only support seeking to the first byte atm
# implementation in case seeking to arbitrary offsets would be necessary
# while offset > 0:
# offset -= len(self.read(offset))
# return offset
def read(self, size):
if not self._read_chunk:
try:
self._read_chunk += next(self._read_iter)
except StopIteration:
return b''
rv, self._read_chunk = self._read_chunk[:size], self._read_chunk[size:]
return rv
def close(self):
self._read_iter = iter([]) # make next read() call return EOF
def cancel(self):
self._read_iter = iter([]) # make next read() call return EOF
# TODO?
class ImageOverlay:
def __init__(self, m, overlay_id, img=None, pos=(0, 0)):
self.m = m
self.overlay_id = overlay_id
self.pos = pos
self._size = None
if img is not None:
self.update(img)
def update(self, img=None, pos=None):
from PIL import Image
if img is not None:
self.img = img
img = self.img
w, h = img.size
stride = w * 4
if pos is not None:
self.pos = pos
x, y = self.pos
# Pre-multiply alpha channel
bg = Image.new('RGBA', (w, h), (0, 0, 0, 0))
out = Image.alpha_composite(bg, img)
# Copy image to ctypes buffer
if img.size != self._size:
self._buf = create_string_buffer(w * h * 4)
self._size = img.size
ctypes.memmove(self._buf, out.tobytes('raw', 'BGRA'), w * h * 4)
source = '&' + str(addressof(self._buf))
self.m.overlay_add(self.overlay_id, x, y, source, 0, 'bgra', w, h, stride)
def remove(self):
self.m.remove_overlay(self.overlay_id)
class FileOverlay:
def __init__(self, m, overlay_id, filename=None, size=None, stride=None, pos=(0, 0)):
self.m = m
self.overlay_id = overlay_id
self.pos = pos
self.size = size
self.stride = stride
if filename is not None:
self.update(filename)
def update(self, filename=None, size=None, stride=None, pos=None):
if filename is not None:
self.filename = filename
if pos is not None:
self.pos = pos
if size is not None:
self.size = size
if stride is not None:
self.stride = stride
x, y = self.pos
w, h = self.size
stride = self.stride or 4 * w
self.m.overlay_add(self, self.overlay_id, x, y, self.filename, 0, 'bgra', w, h, stride)
def remove(self):
self.m.remove_overlay(self.overlay_id)
class MPV(object):
"""See man mpv(1) for the details of the implemented commands. All mpv properties can be accessed as
``my_mpv.some_property`` and all mpv options can be accessed as ``my_mpv['some-option']``.
By default, properties are returned as decoded ``str`` and an error is thrown if the value does not contain valid
utf-8. To get a decoded ``str`` if possibly but ``bytes`` instead of an error if not, use
``my_mpv.lazy.some_property``. To always get raw ``bytes``, use ``my_mpv.raw.some_property``. To access a
property's decoded OSD value, use ``my_mpv.osd.some_property``.
To get API information on an option, use ``my_mpv.option_info('option-name')``. To get API information on a
property, use ``my_mpv.properties['property-name']``. Take care to use mpv's dashed-names instead of the
underscore_names exposed on the python object.
To make your program not barf hard the first time its used on a weird file system **always** access properties
containing file names or file tags through ``MPV.raw``. """
def __init__(self, *extra_mpv_flags, log_handler=None, start_event_thread=True, loglevel=None, **extra_mpv_opts):
"""Create an MPV instance.
Extra arguments and extra keyword arguments will be passed to mpv as options.
"""
self.handle = _mpv_create()
self._event_thread = None
self._core_shutdown = False
_mpv_set_option_string(self.handle, b'audio-display', b'no')
istr = lambda o: ('yes' if o else 'no') if type(o) is bool else str(o)
try:
for flag in extra_mpv_flags:
_mpv_set_option_string(self.handle, flag.encode('utf-8'), b'')
for k, v in extra_mpv_opts.items():
_mpv_set_option_string(self.handle, k.replace('_', '-').encode('utf-8'), istr(v).encode('utf-8'))
finally:
_mpv_initialize(self.handle)
self.osd = _OSDPropertyProxy(self)
self.file_local = _FileLocalProxy(self)
self.raw = _DecoderPropertyProxy(self, identity_decoder)
self.strict = _DecoderPropertyProxy(self, strict_decoder)
self.lazy = _DecoderPropertyProxy(self, lazy_decoder)
self._event_callbacks = []
self._event_handler_lock = threading.Lock()
self._property_handlers = collections.defaultdict(lambda: [])
self._quit_handlers = set()
self._message_handlers = {}
self._key_binding_handlers = {}
self._event_handle = _mpv_create_client(self.handle, b'py_event_handler')
self._log_handler = log_handler
self._stream_protocol_cbs = {}
self._stream_protocol_frontends = collections.defaultdict(lambda: {})
self._open_func = self.register_stream_protocol('python', self._python_stream_open)
self._python_streams = {}
self._python_stream_catchall = None
self.overlay_ids = set()
self.overlays = {}
if loglevel is not None or log_handler is not None:
self.set_loglevel(loglevel or 'terminal-default')
if start_event_thread:
self._event_thread = threading.Thread(target=self._loop, name='MPVEventHandlerThread')
self._event_thread.setDaemon(True)
self._event_thread.start()
else:
self._event_thread = None
def _loop(self):
for event in _event_generator(self._event_handle):
try:
devent = event.as_dict(decoder=lazy_decoder) # copy data from ctypes
eid = devent['event_id']
with self._event_handler_lock:
if eid == MpvEventID.SHUTDOWN:
self._core_shutdown = True
for callback in self._event_callbacks:
callback(devent)
if eid == MpvEventID.PROPERTY_CHANGE:
pc = devent['event']
name, value, _fmt = pc['name'], pc['value'], pc['format']
for handler in self._property_handlers[name]:
handler(name, value)
if eid == MpvEventID.LOG_MESSAGE and self._log_handler is not None:
ev = devent['event']
self._log_handler(ev['level'], ev['prefix'], ev['text'])
if eid == MpvEventID.CLIENT_MESSAGE:
# {'event': {'args': ['key-binding', 'foo', 'u-', 'g']}, 'reply_userdata': 0, 'error': 0, 'event_id': 16}
target, *args = devent['event']['args']
if target in self._message_handlers:
self._message_handlers[target](*args)
if eid == MpvEventID.SHUTDOWN:
_mpv_detach_destroy(self._event_handle)
return
except Exception as e:
print('Exception inside python-mpv event loop:', file=sys.stderr)
traceback.print_exc()
@property
def core_shutdown(self):
"""Property indicating whether the core has been shut down. Possible causes for this are e.g. the `quit` command
or a user closing the mpv window."""
return self._core_shutdown
def check_core_alive(self):
""" This method can be used as a sanity check to tests whether the core is still alive at the time it is
called."""
if self._core_shutdown:
raise ShutdownError('libmpv core has been shutdown')
def wait_until_paused(self):
"""Waits until playback of the current title is paused or done. Raises a ShutdownError if the core is shutdown while
waiting."""
self.wait_for_property('core-idle')
def wait_for_playback(self):
"""Waits until playback of the current title is finished. Raises a ShutdownError if the core is shutdown while
waiting.
"""
self.wait_for_event('end_file')
def wait_until_playing(self):
"""Waits until playback of the current title has started. Raises a ShutdownError if the core is shutdown while
waiting."""
self.wait_for_property('core-idle', lambda idle: not idle)
def wait_for_property(self, name, cond=lambda val: val, level_sensitive=True):
"""Waits until ``cond`` evaluates to a truthy value on the named property. This can be used to wait for
properties such as ``idle_active`` indicating the player is done with regular playback and just idling around.
Raises a ShutdownError when the core is shutdown while waiting.
"""
with self.prepare_and_wait_for_property(name, cond, level_sensitive):
pass
def wait_for_shutdown(self):
'''Wait for core to shutdown (e.g. through quit() or terminate()).'''
sema = threading.Semaphore(value=0)
@self.event_callback('shutdown')
def shutdown_handler(event):
sema.release()
sema.acquire()
shutdown_handler.unregister_mpv_events()
@contextmanager
def prepare_and_wait_for_property(self, name, cond=lambda val: val, level_sensitive=True):
"""Context manager that waits until ``cond`` evaluates to a truthy value on the named property. See
prepare_and_wait_for_event for usage.
Raises a ShutdownError when the core is shutdown while waiting.
"""
sema = threading.Semaphore(value=0)
def observer(name, val):
if cond(val):
sema.release()
self.observe_property(name, observer)
@self.event_callback('shutdown')
def shutdown_handler(event):
sema.release()
yield
if not level_sensitive or not cond(getattr(self, name.replace('-', '_'))):
sema.acquire()
self.check_core_alive()
shutdown_handler.unregister_mpv_events()
self.unobserve_property(name, observer)
def wait_for_event(self, *event_types, cond=lambda evt: True):
"""Waits for the indicated event(s). If cond is given, waits until cond(event) is true. Raises a ShutdownError
if the core is shutdown while waiting. This also happens when 'shutdown' is in event_types.
"""
with self.prepare_and_wait_for_event(*event_types, cond=cond):
pass
@contextmanager
def prepare_and_wait_for_event(self, *event_types, cond=lambda evt: True):
"""Context manager that waits for the indicated event(s) like wait_for_event after running. If cond is given,
waits until cond(event) is true. Raises a ShutdownError if the core is shutdown while waiting. This also happens
when 'shutdown' is in event_types.
Compared to wait_for_event this handles the case where a thread waits for an event it itself causes in a
thread-safe way. An example from the testsuite is:
with self.m.prepare_and_wait_for_event('client_message'):
self.m.keypress(key)
Using just wait_for_event it would be impossible to ensure the event is caught since it may already have been
handled in the interval between keypress(...) running and a subsequent wait_for_event(...) call.
"""
sema = threading.Semaphore(value=0)
@self.event_callback('shutdown')
def shutdown_handler(event):
sema.release()
@self.event_callback(*event_types)
def target_handler(evt):
if cond(evt):
sema.release()
yield
sema.acquire()
self.check_core_alive()
shutdown_handler.unregister_mpv_events()
target_handler.unregister_mpv_events()
def __del__(self):
if self.handle:
self.terminate()
def terminate(self):
"""Properly terminates this player instance. Preferably use this instead of relying on python's garbage
collector to cause this to be called from the object's destructor.
This method will detach the main libmpv handle and wait for mpv to shut down and the event thread to finish.
"""
self.handle, handle = None, self.handle
if threading.current_thread() is self._event_thread:
raise UserWarning('terminate() should not be called from event thread (e.g. from a callback function). If '
'you want to terminate mpv from here, please call quit() instead, then sync the main thread '
'against the event thread using e.g. wait_for_shutdown(), then terminate() from the main thread. '
'This call has been transformed into a call to quit().')
self.quit()
else:
_mpv_terminate_destroy(handle)
if self._event_thread:
self._event_thread.join()
def set_loglevel(self, level):
"""Set MPV's log level. This adjusts which output will be sent to this object's log handlers. If you just want
mpv's regular terminal output, you don't need to adjust this but just need to pass a log handler to the MPV
constructur such as ``MPV(log_handler=print)``.
Valid log levels are "no", "fatal", "error", "warn", "info", "v" "debug" and "trace". For details see your mpv's
client.h header file.
"""
_mpv_request_log_messages(self._event_handle, level.encode('utf-8'))
def command(self, name, *args):
"""Execute a raw command."""
args = [name.encode('utf-8')] + [(arg if type(arg) is bytes else str(arg).encode('utf-8'))
for arg in args if arg is not None] + [None]
_mpv_command(self.handle, (c_char_p * len(args))(*args))
def node_command(self, name, *args, decoder=strict_decoder):
_1, _2, _3, pointer = _make_node_str_list([name, *args])
out = cast(create_string_buffer(sizeof(MpvNode)), POINTER(MpvNode))
ppointer = cast(pointer, POINTER(MpvNode))
_mpv_command_node(self.handle, ppointer, out)
rv = out.contents.node_value(decoder=decoder)
_mpv_free_node_contents(out)
return rv
def seek(self, amount, reference="relative", precision="default-precise"):
"""Mapped mpv seek command, see man mpv(1)."""
self.command('seek', amount, reference, precision)
def revert_seek(self):
"""Mapped mpv revert_seek command, see man mpv(1)."""
self.command('revert_seek');
def frame_step(self):
"""Mapped mpv frame-step command, see man mpv(1)."""
self.command('frame-step')
def frame_back_step(self):
"""Mapped mpv frame_back_step command, see man mpv(1)."""
self.command('frame_back_step')
def property_add(self, name, value=1):
"""Add the given value to the property's value. On overflow or underflow, clamp the property to the maximum. If
``value`` is omitted, assume ``1``.
"""
self.command('add', name, value)
def property_multiply(self, name, factor):
"""Multiply the value of a property with a numeric factor."""
self.command('multiply', name, factor)
def cycle(self, name, direction='up'):
"""Cycle the given property. ``up`` and ``down`` set the cycle direction. On overflow, set the property back to
the minimum, on underflow set it to the maximum. If ``up`` or ``down`` is omitted, assume ``up``.
"""
self.command('cycle', name, direction)
def screenshot(self, includes='subtitles', mode='single'):
"""Mapped mpv screenshot command, see man mpv(1)."""
self.command('screenshot', includes, mode)
def screenshot_to_file(self, filename, includes='subtitles'):
"""Mapped mpv screenshot_to_file command, see man mpv(1)."""
self.command('screenshot_to_file', filename.encode(fs_enc), includes)
def screenshot_raw(self, includes='subtitles'):
"""Mapped mpv screenshot_raw command, see man mpv(1). Returns a pillow Image object."""
from PIL import Image
res = self.node_command('screenshot-raw', includes)
if res['format'] != 'bgr0':
raise ValueError('Screenshot in unknown format "{}". Currently, only bgr0 is supported.'
.format(res['format']))
img = Image.frombytes('RGBA', (res['stride'] // 4, res['h']), res['data'])
b, g, r, a = img.split()
return Image.merge('RGB', (r, g, b))
def allocate_overlay_id(self):
free_ids = set(range(64)) - self.overlay_ids
if not free_ids:
raise IndexError('All overlay IDs are in use')
next_id, *_ = sorted(free_ids)
self.overlay_ids.add(next_id)
return next_id
def free_overlay_id(self, overlay_id):
self.overlay_ids.remove(overlay_id)
def create_file_overlay(self, filename=None, size=None, stride=None, pos=(0, 0)):
overlay_id = self.allocate_overlay_id()
overlay = FileOverlay(self, overlay_id, filename, size, stride, pos)
self.overlays[overlay_id] = overlay
return overlay
def create_image_overlay(self, img=None, pos=(0, 0)):
overlay_id = self.allocate_overlay_id()
overlay = ImageOverlay(self, overlay_id, img, pos)
self.overlays[overlay_id] = overlay
return overlay
def remove_overlay(self, overlay_id):
self.overlay_remove(overlay_id)
self.free_overlay_id(overlay_id)
del self.overlays[overlay_id]
def playlist_next(self, mode='weak'):
"""Mapped mpv playlist_next command, see man mpv(1)."""
self.command('playlist_next', mode)
def playlist_prev(self, mode='weak'):
"""Mapped mpv playlist_prev command, see man mpv(1)."""
self.command('playlist_prev', mode)
def playlist_play_index(self, idx):
"""Mapped mpv playlist-play-index command, see man mpv(1)."""
self.command('playlist-play-index', idx)
@staticmethod
def _encode_options(options):
return ','.join('{}={}'.format(_py_to_mpv(str(key)), str(val)) for key, val in options.items())
def loadfile(self, filename, mode='replace', **options):
"""Mapped mpv loadfile command, see man mpv(1)."""
self.command('loadfile', filename.encode(fs_enc), mode, MPV._encode_options(options))
def loadlist(self, playlist, mode='replace'):
"""Mapped mpv loadlist command, see man mpv(1)."""
self.command('loadlist', playlist.encode(fs_enc), mode)
def playlist_clear(self):
"""Mapped mpv playlist_clear command, see man mpv(1)."""
self.command('playlist_clear')
def playlist_remove(self, index='current'):
"""Mapped mpv playlist_remove command, see man mpv(1)."""
self.command('playlist_remove', index)
def playlist_move(self, index1, index2):
"""Mapped mpv playlist_move command, see man mpv(1)."""
self.command('playlist_move', index1, index2)
def playlist_shuffle(self):
"""Mapped mpv playlist-shuffle command, see man mpv(1)."""
self.command('playlist-shuffle')
def playlist_unshuffle(self):
"""Mapped mpv playlist-unshuffle command, see man mpv(1)."""
self.command('playlist-unshuffle')
def run(self, command, *args):
"""Mapped mpv run command, see man mpv(1)."""
self.command('run', command, *args)
def quit(self, code=None):
"""Mapped mpv quit command, see man mpv(1)."""
self.command('quit', code)
def quit_watch_later(self, code=None):
"""Mapped mpv quit_watch_later command, see man mpv(1)."""
self.command('quit_watch_later', code)
def stop(self, keep_playlist=False):
"""Mapped mpv stop command, see man mpv(1)."""
if keep_playlist:
self.command('stop', 'keep-playlist')
else:
self.command('stop')
def audio_add(self, url, flags='select', title=None, lang=None):
"""Mapped mpv audio_add command, see man mpv(1)."""
self.command('audio_add', url.encode(fs_enc), *_drop_nones(flags, title, lang))
def audio_remove(self, audio_id=None):
"""Mapped mpv audio_remove command, see man mpv(1)."""
self.command('audio_remove', audio_id)
def audio_reload(self, audio_id=None):
"""Mapped mpv audio_reload command, see man mpv(1)."""
self.command('audio_reload', audio_id)
def video_add(self, url, flags='select', title=None, lang=None):
"""Mapped mpv video_add command, see man mpv(1)."""
self.command('video_add', url.encode(fs_enc), *_drop_nones(flags, title, lang))
def video_remove(self, video_id=None):
"""Mapped mpv video_remove command, see man mpv(1)."""
self.command('video_remove', video_id)
def video_reload(self, video_id=None):
"""Mapped mpv video_reload command, see man mpv(1)."""
self.command('video_reload', video_id)
def sub_add(self, url, flags='select', title=None, lang=None):
"""Mapped mpv sub_add command, see man mpv(1)."""
self.command('sub_add', url.encode(fs_enc), *_drop_nones(flags, title, lang))
def sub_remove(self, sub_id=None):
"""Mapped mpv sub_remove command, see man mpv(1)."""
self.command('sub_remove', sub_id)
def sub_reload(self, sub_id=None):
"""Mapped mpv sub_reload command, see man mpv(1)."""
self.command('sub_reload', sub_id)
def sub_step(self, skip):
"""Mapped mpv sub_step command, see man mpv(1)."""
self.command('sub_step', skip)
def sub_seek(self, skip):
"""Mapped mpv sub_seek command, see man mpv(1)."""
self.command('sub_seek', skip)
def toggle_osd(self):
"""Mapped mpv osd command, see man mpv(1)."""
self.command('osd')
def print_text(self, text):
"""Mapped mpv print-text command, see man mpv(1)."""
self.command('print-text', text)
def show_text(self, string, duration='-1', level=None):
"""Mapped mpv show_text command, see man mpv(1)."""
self.command('show_text', string, duration, level)
def expand_text(self, text):
"""Mapped mpv expand-text command, see man mpv(1)."""
return self.node_command('expand-text', text)
def expand_path(self, path):
"""Mapped mpv expand-path command, see man mpv(1)."""
return self.node_command('expand-path', path)
def show_progress(self):
"""Mapped mpv show_progress command, see man mpv(1)."""
self.command('show_progress')
def rescan_external_files(self, mode='reselect'):
"""Mapped mpv rescan-external-files command, see man mpv(1)."""
self.command('rescan-external-files', mode)
def discnav(self, command):
"""Mapped mpv discnav command, see man mpv(1)."""
self.command('discnav', command)
def mouse(x, y, button=None, mode='single'):
"""Mapped mpv mouse command, see man mpv(1)."""
if button is None:
self.command('mouse', x, y, mode)
else:
self.command('mouse', x, y, button, mode)
def keypress(self, name):
"""Mapped mpv keypress command, see man mpv(1)."""
self.command('keypress', name)
def keydown(self, name):
"""Mapped mpv keydown command, see man mpv(1)."""
self.command('keydown', name)
def keyup(self, name=None):
"""Mapped mpv keyup command, see man mpv(1)."""
if name is None:
self.command('keyup')
else:
self.command('keyup', name)
def keybind(self, name, command):
"""Mapped mpv keybind command, see man mpv(1)."""
self.command('keybind', name, command)
def write_watch_later_config(self):
"""Mapped mpv write_watch_later_config command, see man mpv(1)."""
self.command('write_watch_later_config')
def overlay_add(self, overlay_id, x, y, file_or_fd, offset, fmt, w, h, stride):
"""Mapped mpv overlay_add command, see man mpv(1)."""
self.command('overlay_add', overlay_id, x, y, file_or_fd, offset, fmt, w, h, stride)
def overlay_remove(self, overlay_id):
"""Mapped mpv overlay_remove command, see man mpv(1)."""
self.command('overlay_remove', overlay_id)
def script_message(self, *args):
"""Mapped mpv script_message command, see man mpv(1)."""
self.command('script_message', *args)
def script_message_to(self, target, *args):
"""Mapped mpv script_message_to command, see man mpv(1)."""
self.command('script_message_to', target, *args)
def observe_property(self, name, handler):
"""Register an observer on the named property. An observer is a function that is called with the new property
value every time the property's value is changed. The basic function signature is ``fun(property_name,
new_value)`` with new_value being the decoded property value as a python object. This function can be used as a
function decorator if no handler is given.
To unregister the observer, call either of ``mpv.unobserve_property(name, handler)``,
``mpv.unobserve_all_properties(handler)`` or the handler's ``unregister_mpv_properties`` attribute::
@player.observe_property('volume')
def my_handler(new_volume, *):
print("It's loud!", volume)
my_handler.unregister_mpv_properties()
exit_handler is a function taking no arguments that is called when the underlying mpv handle is terminated (e.g.
from calling MPV.terminate() or issuing a "quit" input command).
"""
self._property_handlers[name].append(handler)
_mpv_observe_property(self._event_handle, hash(name) & 0xffffffffffffffff, name.encode('utf-8'), MpvFormat.NODE)
def property_observer(self, name):
"""Function decorator to register a property observer. See ``MPV.observe_property`` for details."""
def wrapper(fun):
self.observe_property(name, fun)
fun.unobserve_mpv_properties = lambda: self.unobserve_property(name, fun)
return fun
return wrapper
def unobserve_property(self, name, handler):
"""Unregister a property observer. This requires both the observed property's name and the handler function that
was originally registered as one handler could be registered for several properties. To unregister a handler
from *all* observed properties see ``unobserve_all_properties``.
"""
self._property_handlers[name].remove(handler)
if not self._property_handlers[name]:
_mpv_unobserve_property(self._event_handle, hash(name) & 0xffffffffffffffff)
def unobserve_all_properties(self, handler):
"""Unregister a property observer from *all* observed properties."""
for name in self._property_handlers:
self.unobserve_property(name, handler)
def register_message_handler(self, target, handler=None):
"""Register a mpv script message handler. This can be used to communicate with embedded lua scripts. Pass the
script message target name this handler should be listening to and the handler function.
WARNING: Only one handler can be registered at a time for any given target.
To unregister the message handler, call its ``unregister_mpv_messages`` function::
player = mpv.MPV()
@player.message_handler('foo')
def my_handler(some, args):
print(args)
my_handler.unregister_mpv_messages()
"""
self._register_message_handler_internal(target, handler)
def _register_message_handler_internal(self, target, handler):
self._message_handlers[target] = handler
def unregister_message_handler(self, target_or_handler):
"""Unregister a mpv script message handler for the given script message target name.
You can also call the ``unregister_mpv_messages`` function attribute set on the handler function when it is
registered.
"""
if isinstance(target_or_handler, str):
del self._message_handlers[target_or_handler]
else:
for key, val in self._message_handlers.items():
if val == target_or_handler:
del self._message_handlers[key]
def message_handler(self, target):
"""Decorator to register a mpv script message handler.
WARNING: Only one handler can be registered at a time for any given target.
To unregister the message handler, call its ``unregister_mpv_messages`` function::
player = mpv.MPV()
@player.message_handler('foo')
def my_handler(some, args):
print(args)
my_handler.unregister_mpv_messages()
"""
def register(handler):
self._register_message_handler_internal(target, handler)
handler.unregister_mpv_messages = lambda: self.unregister_message_handler(handler)
return handler
return register
def register_event_callback(self, callback):
"""Register a blanket event callback receiving all event types.
To unregister the event callback, call its ``unregister_mpv_events`` function::
player = mpv.MPV()
@player.event_callback('shutdown')
def my_handler(event):
print('It ded.')
my_handler.unregister_mpv_events()
"""
self._event_callbacks.append(callback)
def unregister_event_callback(self, callback):
"""Unregiser an event callback."""
self._event_callbacks.remove(callback)
def event_callback(self, *event_types):
"""Function decorator to register a blanket event callback for the given event types. Event types can be given
as str (e.g. 'start-file'), integer or MpvEventID object.
WARNING: Due to the way this is filtering events, this decorator cannot be chained with itself.
To unregister the event callback, call its ``unregister_mpv_events`` function::
player = mpv.MPV()
@player.event_callback('shutdown')
def my_handler(event):
print('It ded.')
my_handler.unregister_mpv_events()
"""
def register(callback):
with self._event_handler_lock:
self.check_core_alive()
types = [MpvEventID.from_str(t) if isinstance(t, str) else t for t in event_types] or MpvEventID.ANY
@wraps(callback)
def wrapper(event, *args, **kwargs):
if event['event_id'] in types:
callback(event, *args, **kwargs)
self._event_callbacks.append(wrapper)
wrapper.unregister_mpv_events = partial(self.unregister_event_callback, wrapper)
return wrapper
return register
@staticmethod
def _binding_name(callback_or_cmd):
return 'py_kb_{:016x}'.format(hash(callback_or_cmd) & 0xffffffffffffffff)
def on_key_press(self, keydef, mode='force'):
"""Function decorator to register a simplified key binding. The callback is called whenever the key given is
*pressed*.
To unregister the callback function, you can call its ``unregister_mpv_key_bindings`` attribute::
player = mpv.MPV()
@player.on_key_press('Q')
def binding():
print('blep')
binding.unregister_mpv_key_bindings()
WARNING: For a single keydef only a single callback/command can be registered at the same time. If you register
a binding multiple times older bindings will be overwritten and there is a possibility of references leaking. So
don't do that.
The BIG FAT WARNING regarding untrusted keydefs from the key_binding method applies here as well.
"""
def register(fun):
@self.key_binding(keydef, mode)
@wraps(fun)
def wrapper(state='p-', name=None, char=None):
if state[0] in ('d', 'p'):
fun()
return wrapper
return register
def key_binding(self, keydef, mode='force'):
"""Function decorator to register a low-level key binding.
The callback function signature is ``fun(key_state, key_name)`` where ``key_state`` is either ``'U'`` for "key
up" or ``'D'`` for "key down".
The keydef format is: ``[Shift+][Ctrl+][Alt+][Meta+]<key>`` where ``<key>`` is either the literal character the
key produces (ASCII or Unicode character), or a symbolic name (as printed by ``mpv --input-keylist``).
To unregister the callback function, you can call its ``unregister_mpv_key_bindings`` attribute::
player = mpv.MPV()
@player.key_binding('Q')
def binding(state, name, char):
print('blep')
binding.unregister_mpv_key_bindings()
WARNING: For a single keydef only a single callback/command can be registered at the same time. If you register
a binding multiple times older bindings will be overwritten and there is a possibility of references leaking. So
don't do that.
BIG FAT WARNING: mpv's key binding mechanism is pretty powerful. This means, you essentially get arbitrary code
exectution through key bindings. This interface makes some limited effort to sanitize the keydef given in the
first parameter, but YOU SHOULD NOT RELY ON THIS IN FOR SECURITY. If your input comes from config files, this is
completely fine--but, if you are about to pass untrusted input into this parameter, better double-check whether
this is secure in your case.
"""
def register(fun):
fun.mpv_key_bindings = getattr(fun, 'mpv_key_bindings', []) + [keydef]
def unregister_all():
for keydef in fun.mpv_key_bindings:
self.unregister_key_binding(keydef)
fun.unregister_mpv_key_bindings = unregister_all
self.register_key_binding(keydef, fun, mode)
return fun
return register
def register_key_binding(self, keydef, callback_or_cmd, mode='force'):
"""Register a key binding. This takes an mpv keydef and either a string containing a mpv command or a python
callback function. See ``MPV.key_binding`` for details.
"""
if not re.match(r'(Shift+)?(Ctrl+)?(Alt+)?(Meta+)?(.|\w+)', keydef):
raise ValueError('Invalid keydef. Expected format: [Shift+][Ctrl+][Alt+][Meta+]<key>\n'
'<key> is either the literal character the key produces (ASCII or Unicode character), or a '
'symbolic name (as printed by --input-keylist')
binding_name = MPV._binding_name(keydef)
if callable(callback_or_cmd):
self._key_binding_handlers[binding_name] = callback_or_cmd
self.register_message_handler('key-binding', self._handle_key_binding_message)
self.command('define-section',
binding_name, '{} script-binding py_event_handler/{}'.format(keydef, binding_name), mode)
elif isinstance(callback_or_cmd, str):
self.command('define-section', binding_name, '{} {}'.format(keydef, callback_or_cmd), mode)
else:
raise TypeError('register_key_binding expects either an str with an mpv command or a python callable.')
self.command('enable-section', binding_name, 'allow-hide-cursor+allow-vo-dragging')
def _handle_key_binding_message(self, binding_name, key_state, key_name=None, key_char=None):
self._key_binding_handlers[binding_name](key_state, key_name, key_char)
def unregister_key_binding(self, keydef):
"""Unregister a key binding by keydef."""
binding_name = MPV._binding_name(keydef)
self.command('disable-section', binding_name)
self.command('define-section', binding_name, '')
if binding_name in self._key_binding_handlers:
del self._key_binding_handlers[binding_name]
if not self._key_binding_handlers:
self.unregister_message_handler('key-binding')
def register_stream_protocol(self, proto, open_fn=None):
""" Register a custom stream protocol as documented in libmpv/stream_cb.h:
https://github.com/mpv-player/mpv/blob/master/libmpv/stream_cb.h
proto is the protocol scheme, e.g. "foo" for "foo://" urls.
This function can either be used with two parameters or it can be used as a decorator on the target
function.
open_fn is a function taking an URI string and returning an mpv stream object.
open_fn may raise a ValueError to signal libmpv the URI could not be opened.
The mpv stream protocol is as follows:
class Stream:
@property
def size(self):
return None # unknown size
return size # int with size in bytes
def read(self, size):
...
return read # non-empty bytes object with input
return b'' # empty byte object signals permanent EOF
def seek(self, pos):
return new_offset # integer with new byte offset. The new offset may be before the requested offset
in case an exact seek is inconvenient.
def close(self):
...
# def cancel(self): (future API versions only)
# Abort a running read() or seek() operation
# ...
"""
def decorator(open_fn):
@StreamOpenFn
def open_backend(_userdata, uri, cb_info):
print('[test]open backend')
try:
frontend = open_fn(uri.decode('utf-8'))
except ValueError:
return ErrorCode.LOADING_FAILED
def read_backend(_userdata, buf, bufsize):
data = frontend.read(bufsize)
for i in range(len(data)):
buf[i] = data[i]
return len(data)
cb_info.contents.cookie = None
read = cb_info.contents.read = StreamReadFn(read_backend)
close = cb_info.contents.close = StreamCloseFn(lambda _userdata: frontend.close())
seek, size, cancel = None, None, None
if hasattr(frontend, 'seek'):
seek = cb_info.contents.seek = StreamSeekFn(lambda _userdata, offx: frontend.seek(offx))
if hasattr(frontend, 'size') and frontend.size is not None:
size = cb_info.contents.size = StreamSizeFn(lambda _userdata: frontend.size)
# Future API versions only
# if hasattr(frontend, 'cancel'):
# cb_info.contents.cancel = StreamCancelFn(lambda _userdata: frontend.cancel())
# keep frontend and callbacks in memory forever (TODO)
frontend._registered_callbacks = [read, close, seek, size, cancel]
self._stream_protocol_frontends[proto][uri] = frontend
return 0
if proto in self._stream_protocol_cbs:
raise KeyError('Stream protocol already registered')
self._stream_protocol_cbs[proto] = [open_backend]
_mpv_stream_cb_add_ro(self.handle, proto.encode('utf-8'), c_void_p(), open_backend)
return open_fn
if open_fn is not None:
decorator(open_fn)
return decorator
# Convenience functions
def play(self, filename):
"""Play a path or URL (requires ``ytdl`` option to be set)."""
self.loadfile(filename)
@property
def playlist_filenames(self):
"""Return all playlist item file names/URLs as a list of strs."""
return [element['filename'] for element in self.playlist]
def playlist_append(self, filename, **options):
"""Append a path or URL to the playlist. This does not start playing the file automatically. To do that, use
``MPV.loadfile(filename, 'append-play')``."""
self.loadfile(filename, 'append', **options)
# "Python stream" logic. This is some porcelain for directly playing data from python generators.
def _python_stream_open(self, uri):
"""Internal handler for python:// protocol streams registered through @python_stream(...) and
@python_stream_catchall
"""
name, = re.fullmatch('python://(.*)', uri).groups()
if name in self._python_streams:
generator_fun, size = self._python_streams[name]
else:
if self._python_stream_catchall is not None:
generator_fun, size = self._python_stream_catchall(name)
else:
raise ValueError('Python stream name not found and no catch-all defined')
return GeneratorStream(generator_fun, size)
def python_stream(self, name=None, size=None):
"""Register a generator for the python stream with the given name.
name is the name, i.e. the part after the "python://" in the URI, that this generator is registered as.
size is the total number of bytes in the stream (if known).
Any given name can only be registered once. The catch-all can also only be registered once. To unregister a
stream, call the .unregister function set on the callback.
The generator signals EOF by returning, manually raising StopIteration or by yielding b'', an empty bytes
object.
The generator may be called multiple times if libmpv seeks or loops.
See also: @mpv.python_stream_catchall
@mpv.python_stream('foobar')
def reader():
for chunk in chunks:
yield chunk
mpv.play('python://foobar')
mpv.wait_for_playback()
reader.unregister()
"""
def register(cb):
if name in self._python_streams:
raise KeyError('Python stream name "{}" is already registered'.format(name))
self._python_streams[name] = (cb, size)
def unregister():
if name not in self._python_streams or \
self._python_streams[name][0] is not cb: # This is just a basic sanity check
raise RuntimeError('Python stream has already been unregistered')
del self._python_streams[name]
cb.unregister = unregister
return cb
return register
def python_stream_catchall(self, cb):
""" Register a catch-all python stream to be called when no name matches can be found. Use this decorator on a
function that takes a name argument and returns a (generator, size) tuple (with size being None if unknown).
An invalid URI can be signalled to libmpv by raising a ValueError inside the callback.
See also: @mpv.python_stream(name, size)
@mpv.python_stream_catchall
def catchall(name):
if not name.startswith('foo'):
raise ValueError('Unknown Name')
def foo_reader():
with open(name, 'rb') as f:
while True:
chunk = f.read(1024)
if not chunk:
break
yield chunk
return foo_reader, None
mpv.play('python://foo23')
mpv.wait_for_playback()
catchall.unregister()
"""
if self._python_stream_catchall is not None:
raise KeyError('A catch-all python stream is already registered')
self._python_stream_catchall = cb
def unregister():
if self._python_stream_catchall is not cb:
raise RuntimeError('This catch-all python stream has already been unregistered')
self._python_stream_catchall = None
cb.unregister = unregister
return cb
# Property accessors
def _get_property(self, name, decoder=strict_decoder, fmt=MpvFormat.NODE):
self.check_core_alive()
out = create_string_buffer(sizeof(MpvNode))
try:
cval = _mpv_get_property(self.handle, name.encode('utf-8'), fmt, out)
if fmt is MpvFormat.OSD_STRING:
return cast(out, POINTER(c_char_p)).contents.value.decode('utf-8')
elif fmt is MpvFormat.NODE:
rv = cast(out, POINTER(MpvNode)).contents.node_value(decoder=decoder)
_mpv_free_node_contents(out)
return rv
else:
raise TypeError('_get_property only supports NODE and OSD_STRING formats.')
except PropertyUnavailableError as ex:
return None
def _set_property(self, name, value):
self.check_core_alive()
ename = name.encode('utf-8')
if isinstance(value, (list, set, dict)):
_1, _2, _3, pointer = _make_node_str_list(value)
_mpv_set_property(self.handle, ename, MpvFormat.NODE, pointer)
else:
_mpv_set_property_string(self.handle, ename, _mpv_coax_proptype(value))
def __getattr__(self, name):
return self._get_property(_py_to_mpv(name), lazy_decoder)
def __setattr__(self, name, value):
try:
if name != 'handle' and not name.startswith('_'):
self._set_property(_py_to_mpv(name), value)
else:
super().__setattr__(name, value)
except AttributeError:
super().__setattr__(name, value)
def __dir__(self):
return super().__dir__() + [name.replace('-', '_') for name in self.property_list]
@property
def properties(self):
return {name: self.option_info(name) for name in self.property_list}
# Dict-like option access
def __getitem__(self, name, file_local=False):
"""Get an option value."""
prefix = 'file-local-options/' if file_local else 'options/'
return self._get_property(prefix + name, lazy_decoder)
def __setitem__(self, name, value, file_local=False):
"""Set an option value."""
prefix = 'file-local-options/' if file_local else 'options/'
return self._set_property(prefix + name, value)
def __iter__(self):
"""Iterate over all option names."""
return iter(self.options)
def option_info(self, name):
"""Get information on the given option."""
try:
return self._get_property('option-info/' + name)
except AttributeError:
return None
class MpvRenderContext:
def __init__(self, mpv, api_type, **kwargs):
self._mpv = mpv
kwargs['api_type'] = api_type
buf = cast(create_string_buffer(sizeof(MpvRenderCtxHandle)), POINTER(MpvRenderCtxHandle))
_mpv_render_context_create(buf, mpv.handle, kwargs_to_render_param_array(kwargs))
self._handle = buf.contents
def free(self):
_mpv_render_context_free(self._handle)
def __setattr__(self, name, value):
if name.startswith('_'):
super().__setattr__(name, value)
elif name == 'update_cb':
func = value if value else (lambda: None)
self._update_cb = value
self._update_fn_wrapper = RenderUpdateFn(lambda _userdata: func())
_mpv_render_context_set_update_callback(self._handle, self._update_fn_wrapper, None)
else:
param = MpvRenderParam(name, value)
_mpv_render_context_set_parameter(self._handle, param)
def __getattr__(self, name):
if name == 'update_cb':
return self._update_cb
elif name == 'handle':
return self._handle
param = MpvRenderParam(name)
data_type = type(param.data.contents)
buf = cast(create_string_buffer(sizeof(data_type)), POINTER(data_type))
param.data = buf
_mpv_render_context_get_info(self._handle, param)
return buf.contents.as_dict()
def update(self):
""" Calls mpv_render_context_update and returns the MPV_RENDER_UPDATE_FRAME flag (see render.h) """
return bool(_mpv_render_context_update(self._handle) & 1)
def render(self, **kwargs):
_mpv_render_context_render(self._handle, kwargs_to_render_param_array(kwargs))
def report_swap(self):
_mpv_render_context_report_swap(self._handle)
def register_protocol(handle, proto, func):
_mpv_stream_cb_add_ro(handle, proto.encode('utf-8'), c_void_p(), func) |
server.py | # -*- coding: utf-8 -*-
import os
import urllib.request
from urllib.error import URLError
from multiprocessing import Process
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
import threading
class AppiumServer:
def __init__(self, l_devices):
self.l_devices = l_devices
def start_server(self):
"""start the appium server
:return:
"""
for i in range(0, len(self.l_devices["appium"])):
t1 = RunServer(self.l_devices["appium"][i]["config"])
p = Process(target=t1.start())
p.start()
def stop_server(self):
"""stop the appium server
selenium_appium: appium selenium
:return:
"""
os.system('taskkill /f /im node.exe')
def re_start_server(self):
"""reStart the appium server
"""
self.stop_server()
self.start_server()
def is_runnnig(self):
"""Determine whether server is running
:return:True or False
"""
response = None
for i in range(0, len(self.l_devices["appium"])):
url = " http://127.0.0.1:"+str(self.l_devices["appium"][i]["port"])+"/wd/hub"+"/status"
try:
response = urllib.request.urlopen(url, timeout=5)
if str(response.getcode()).startswith("2"):
return True
else:
return False
except URLError:
return False
finally:
if response:
response.close()
class RunServer(threading.Thread):
def __init__(self, cmd):
threading.Thread.__init__(self)
self.cmd = cmd
def run(self):
os.system(self.cmd)
# if __name__ == "__main__":
#
# oo = AppiumServer()
# oo.start_server()
# print("strart server")
# print("running server")
# oo.stop_server()
# print("stop server") |
test_autograd.py | # Owner(s): ["module: autograd"]
import contextlib
import gc
import io
import math
import os
import random
import sys
import tempfile
import threading
import time
import unittest
import uuid
import warnings
import operator
import subprocess
from copy import deepcopy
from collections import OrderedDict
from itertools import product
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, record_function, emit_nvtx)
from torch.autograd.profiler_util import (_format_time, EventList, FunctionEvent, FunctionEventAvg)
from torch.utils.checkpoint import checkpoint
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoLapack, slowTest, IS_WINDOWS, IS_MACOS,
disable_gc, gradcheck, gradgradcheck, parametrize,
instantiate_parametrized_tests, skipIfMps)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available, _calculate_shape
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import mask_not_all_zeros
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipMeta, dtypesIfMPS)
from torch.testing._internal.common_dtype import floating_types_and
from torch.utils._mode_utils import no_dispatch
import pickle
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.igamma, choose a different op
torch.igamma(dual_x, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
def test_accumulate_grad_with_zero_numel_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_grad_batched_grad(self):
x = torch.randn(2, 2, requires_grad=True)
out = x.clone() # Size([2, 2])
batched_grad = torch.arange(3).expand(2, 2, 3).transpose(0, 2) # Size([3, 2, 2])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# Detect shape mismatch
grad_out = torch.ones(2, 2)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
# Scalar outputs
out = x.sum() # Size([])
batched_grad = torch.arange(3) # Size([3])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# We consider scalar and sized-1 to be a mismatch. This is consistent with current non-batched behavior.
grad_out = torch.ones(2).unsqueeze(1)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def test_detach_then_inplace_raises_in_autograd(self):
x = torch.randn([], requires_grad=True)
orig_x = x.detach().clone()
y = x ** 2 # saves x
z = x.detach()
z.zero_()
with self.assertRaisesRegex(RuntimeError, "has been modified by an inplace"):
y.backward()
def test_detach_disallows_metadata_change(self):
x = torch.randn([], requires_grad=True)
detached = x.detach()
with self.assertRaisesRegex(
RuntimeError, "not allowed on a Tensor created from .data or .detach()"):
detached.resize_(3, 3)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertEqual(r1, r2, rtol=0.01, atol=0.0)
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertEqual(input1.grad, input2.grad, rtol=0.01, atol=0.0)
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X, largest=largest)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = (A @ A.mT) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.mT)
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEqual(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
autograd_ops = {
("aten::add", "Add"): [],
("aten::sum", "Sum"): [],
}
accumulate_ops = []
found_empty = False
for e in p.function_events:
for (fwd_name, bwd_name), ops in autograd_ops.items():
if e.name == fwd_name or (bwd_name in e.name and "Backward" in e.name):
ops.append(e)
if "AccumulateGrad" in e.name:
accumulate_ops.append(e)
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
for (fwd_name, bwd_name), ops in autograd_ops.items():
self.assertEqual(len(ops), 3)
self.assertEqual(ops[0].name, fwd_name)
self.assertEqual(ops[1].name, f"autograd::engine::evaluate_function: {bwd_name}Backward0")
self.assertEqual(ops[2].name, f"{bwd_name}Backward0")
self.assertGreaterEqual(ops[0].sequence_nr, 0)
self.assertEqual(ops[1].sequence_nr, ops[0].sequence_nr)
self.assertEqual(ops[2].sequence_nr, ops[0].sequence_nr)
self.assertEqual(ops[0].fwd_thread, 0)
self.assertEqual(ops[1].fwd_thread, ops[0].thread)
self.assertEqual(ops[2].fwd_thread, ops[0].thread)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_record_function_new_signatures(self):
# Test the new _record_function ops work
# Note: Remove once record_function uses these directly
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
record = torch.ops.profiler._record_function_enter_new("bar", None)
try:
y = x * 2 + 4
finally:
torch.ops.profiler._record_function_exit(record)
function_events = p.function_events
foo_event = [event for event in function_events if "bar" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
_format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
imag_key = 'imag'
self.assertRaises(RuntimeError, lambda: hasattr(x, imag_key))
self.assertTrue(hasattr(y, imag_key))
keys.remove(imag_key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward0")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
def test_out_variant_raises_when_inputs_require_grad(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_calculate_shape_util(self):
out = torch.randn(10, 5, requires_grad=True)
grad = torch.randn(5, 10, requires_grad=True)
out_shape, grad_shape = _calculate_shape(out, grad, False)
assert out_shape == torch.Size([10, 5])
assert grad_shape == torch.Size([5, 10])
out = torch.nested_tensor([
torch.randn(10, 5, requires_grad=True),
torch.randn(10, 5, requires_grad=True),
torch.randn(10, 5, requires_grad=True)]
)
grad = torch.nested_tensor([torch.randn(5, 10, requires_grad=True), torch.randn(5, 10, requires_grad=True)])
out_shape, grad_shape = _calculate_shape(out, grad, False)
assert torch.equal(out_shape, torch.tensor([[10, 5], [10, 5], [10, 5]]))
assert torch.equal(grad_shape, torch.tensor([[5, 10], [5, 10]]))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.expectedFailure
def test_gradcheck_sparse_csr_input(self):
def check(fast_mode):
def fn(sparse_csr):
return torch.clone(sparse_csr).to_dense()
# Fails because gradcheck can't work with sparse csr inputs yet
gradcheck(fn, torch.rand(2, 2, dtype=torch.double).to_sparse_csr().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(2, 2, dtype=torch.double).to_sparse_csr().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
# check(fast_mode=True) # Segmentation fault
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_gradcheck_forward_ad_runs_with_no_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
# This test checks that when the inputs are passed to the function they should not have
# requires_grad=True even though they may have requires_grad=True when passed
# to gradcheck
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
if fwAD._current_level >= 0:
self.assertFalse(x.requires_grad)
self.assertFalse(y.requires_grad)
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t, y_t
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
def test_gradcheck_forward_ad_respects_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
jvp_count = [0]
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
jvp_count[0] += 1
return x_t, y_t
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 2) # (2) once per input
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 6) # (+4): (once with normal ZT (+1), once with efficient ZT (+1)) for each input (x2)
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 12) # (+6): (compute batch of 2 with vmap (+1), with a loop (+2)) for each input (x2)
jvp_count = [0]
# Repeat the previous test except we mark one input with requires_grad=False
# NB: _test_undefined_forward_mode is only (+1), when function has single differentiable input, not (+2)!
# Otherwise, other counts are halved.
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 5) # 1 + 1 + 3
def test_gradcheck_check_forward_or_backward_only(self):
"""Depending on settings for check_forward_ad and check_backward_ad, the
correct codepaths should be reached (or not reached)
"""
fwd_fail_err_msg = "FAIL FWD"
bwd_fail_err_msg = "FAIL BWD"
class UserFn(Function):
@staticmethod
def forward(ctx, foo, fwd_bad, bwd_bad):
ctx.fwd_bad = fwd_bad
ctx.bwd_bad = bwd_bad
return foo * 2
@staticmethod
def vjp(ctx, gO):
if ctx.bwd_bad:
raise RuntimeError(bwd_fail_err_msg)
else:
return 2 * gO, None, None
@staticmethod
def jvp(ctx, gI, _1, _2):
if ctx.fwd_bad:
raise RuntimeError(fwd_fail_err_msg)
else:
return 2 * gI
for fast_mode in (True, False):
for check_forward_ad in (True, False):
for check_backward_ad in (True, False):
for fwd_bad in (True, False):
for bwd_bad in (True, False):
fwd_should_fail = fwd_bad and check_forward_ad
bwd_should_fail = bwd_bad and check_backward_ad
def run():
gradcheck(UserFn.apply, (x, fwd_bad, bwd_bad), check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad, check_undefined_grad=check_backward_ad,
check_batched_grad=check_backward_ad, fast_mode=fast_mode)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
if not check_forward_ad and not check_backward_ad:
with self.assertRaisesRegex(AssertionError, "Expected at least one of"):
run()
continue
if not fwd_should_fail and not bwd_should_fail:
run()
else:
# If both fail, backward AD failure "hides" forward AD failure
if fwd_should_fail:
fail_msg = fwd_fail_err_msg
if bwd_should_fail:
fail_msg = bwd_fail_err_msg
with self.assertRaisesRegex(RuntimeError, fail_msg):
run()
def test_gradcheck_forward_ad_batched_grad(self):
x = torch.rand(2, dtype=torch.double, requires_grad=True)
# multiple inputs and outputs with non-tensors inputs
def fn1(a: torch.Tensor, b: int):
return a.clone(), a + 1
gradcheck(fn1, (x, 1), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
# unrelated inputs: tangent for c is None
def fn2(a: torch.Tensor, c: torch.Tensor):
return a.clone()
gradcheck(fn2, (x, x.clone()), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
class Fn(Function):
@staticmethod
def forward(ctx, foo):
return foo * 2
@staticmethod
def vjp(ctx, gO):
return gO * 2
@staticmethod
def jvp(ctx, gI):
torch.randn_like(gI)
return gI * 2
msg = "vmap: We do not yet support calling random operations inside of vmap"
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(Fn.apply, (x,), check_forward_ad=True, check_batched_forward_grad=True)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
def test_set_data_self_requires_grad(self):
a = torch.tensor(1.0, requires_grad=True)
b = torch.tensor(2.0)
c = torch.tensor(3, dtype=torch.int64)
a.data = b
with self.assertRaisesRegex(RuntimeError, 'must be floating point or complex dtype'):
a.data = c
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
# The autograd engine creates worker threads only when GPU devices are present.
# So make sure that we do shutdown threads when we're testing cuda and make sure
# that there is no thread to shutdown when we're not using cuda.
if TEST_CUDA or torch.backends.mps.is_available():
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
else:
self.assertNotRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
@unittest.skipIf(not torch.cuda.is_available(), "Test requires CUDA")
@slowTest
def test_checkpointing_without_reentrant_memory_savings(self):
class MyModel(nn.Module):
def __init__(self, n, use_checkpoint, use_reentrant):
super().__init__()
self.n = n
self.use_checkpoint = use_checkpoint
self.use_reentrant = use_reentrant
self.layers = nn.ModuleList()
for i in range(self.n):
layer = nn.Sequential(
nn.Linear(256, 256), nn.Linear(256, 256), nn.Linear(256, 256)
)
self.layers.append(layer)
# pre-allocate the grad so that increased memory usage is mainly
# due to activations.
for layer in self.layers:
for lin in layer:
lin.weight.grad = torch.ones_like(lin.weight)
lin.bias.grad = torch.ones_like(lin.bias)
def forward(self, x):
for i in range(self.n):
if not self.use_checkpoint:
x = self.layers[i](x)
else:
x = checkpoint(self.layers[i], x, use_reentrant=self.use_reentrant)
return x
model_no_checkpoint = MyModel(8, use_checkpoint=False, use_reentrant=False).cuda()
model_reentrant_checkpoint = MyModel(8, use_checkpoint=True, use_reentrant=True).cuda()
model_no_reentrant_checkpoint = MyModel(8, use_checkpoint=True, use_reentrant=False).cuda()
x = torch.randn(100, 256, requires_grad=True, device='cuda')
torch.cuda.reset_peak_memory_stats()
loss = model_no_checkpoint(x.clone()).sum()
loss.backward()
mem_no_checkpoint = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
loss = model_reentrant_checkpoint(x.clone()).sum()
loss.backward()
mem_reentrant_checkpoint = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
loss = model_no_reentrant_checkpoint(x.clone()).sum()
loss.backward()
mem_no_reentrant_checkpoint = torch.cuda.max_memory_allocated()
self.assertTrue(mem_reentrant_checkpoint < mem_no_checkpoint)
self.assertTrue(mem_no_reentrant_checkpoint < mem_no_checkpoint)
def test_checkpointing_without_reentrant_custom_function_raises(self):
"""
Accessing ctx.saved_tensors multiple times in a custom function
backward pass with non-reentrant checkpoint currently throws due to
saved tensors not being recomputed in between the accesses.
"""
# For verifying first access to ctx.saved_tensors succeeded.
_first_saved_tensor_access_succeeded = False
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y, z):
w = x * y * z
out = w + w
ctx.save_for_backward(x, y, z, w, out)
return out
@staticmethod
def backward(ctx, grad_out):
x, y, z, w, out = ctx.saved_tensors
nonlocal _first_saved_tensor_access_succeeded
_first_saved_tensor_access_succeeded = True
# Raises issue in non-reentrant checkpointing where
# second access to saved tensors raises because they were
# not recomputed.
x_2, y_2, z_2, w_2, out_2 = ctx.saved_tensors
x = torch.tensor(1., requires_grad=True)
y = torch.tensor(2., requires_grad=True)
z = torch.tensor(3., requires_grad=True)
def foo(x, y, z):
x = x * y * z
y = y * y * z
z = z * z
out = MyFunc.apply(x, y, z)
return out
out = checkpoint(foo, x, y, z, use_reentrant=False)
with self.assertRaisesRegex(
RuntimeError,
"Attempt to retrieve a tensor saved by autograd multiple times"
):
out.sum().backward()
self.assertTrue(_first_saved_tensor_access_succeeded)
def test_access_saved_tensor_twice_without_recomputation_raises(self):
"""
If using saved tensor hooks based checkpointing and a saved tensor
is accessed multiple times without triggering recomputation in the
middle, error is raised indicating so.
"""
def foo(a):
b = a * a
c = a * b
d = torch.exp(a)
return d
a = torch.randn(5, requires_grad=True)
d = checkpoint(foo, a, use_reentrant=False)
# First access
d.grad_fn._saved_result
# Second access raises error
with self.assertRaisesRegex(
RuntimeError,
"Attempt to retrieve a tensor saved by autograd multiple times"
):
d.grad_fn._saved_result
@slowTest
@parametrize("input_requires_grad", [True, False])
def test_checkpointing_without_reentrant(self, input_requires_grad):
"""
Basic test for checkpoint without reentrant autograd.
"""
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
# Run model with and without checkpointing and verify gradients are
# equivalent, regardless of if inputs require grads or not.
module_copy = deepcopy(module)
feat_combined = []
feat_combined_no_checkpoint = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = input_requires_grad
data_r_copy = data_r.clone()
feat_r = checkpoint(module, data_r, use_reentrant=False)
feat_combined.append(feat_r)
feat_r_no_checkpoint = module_copy(data_r)
feat_combined_no_checkpoint.append(feat_r_no_checkpoint)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
mean_combined_no_checkpoint = torch.stack(feat_combined_no_checkpoint).mean()
mean_combined_no_checkpoint.backward()
for checkpoint_param, param in zip(module.parameters(), module_copy.parameters()):
self.assertEqual(checkpoint_param.grad, param.grad)
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
@parametrize("use_reentrant", [True, False])
def test_checkpointing_without_reentrant_detached_tensor(self, use_reentrant):
class NoGradModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.lin2 = nn.Linear(2, 2, bias=False)
def forward(self, x):
with torch.no_grad():
return self.lin2(self.linear(x))
module = NoGradModule()
err_ctx = (
self.assertRaisesRegex(
RuntimeError,
"none of output has requires_grad=True"
)
if use_reentrant
else contextlib.suppress()
)
a = torch.randn(2, 2, requires_grad=True)
for _ in range(3):
with err_ctx:
# out does not require grad
out = checkpoint(module, a, use_reentrant=use_reentrant)
# Make loss require grad, otherwise we would run into
# "element 0 of tensors does not require grad and does not have a grad_fn"
out += a
out.sum().backward()
def test_checkpointing_without_reentrant_correct_grad(self):
"""
Verifies that correct gradients are calculated for checkpoint
without reentrant autograd, for both backward() and autograd.grad().
"""
a = torch.randn(2, 2, requires_grad=True)
b = torch.exp(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
c = checkpoint(torch.exp, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
a.grad = None
d = checkpoint(torch.exp, a, use_reentrant=False).sum()
d_grad, = torch.autograd.grad(d, (a,))
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_checkpointing_without_reentrant_dataparallel(self):
"""
Verifies gradient correctness when checkpoint without reentrant autograd
is used in conjunction with DataParallel.
"""
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
def forward(self, inp):
return self.linear(inp)
a = torch.randn(2, 2, requires_grad=True)
if torch.cuda.is_available():
a = a.cuda()
model = LinearModule()
if torch.cuda.is_available():
model = model.cuda()
b = deepcopy(model)(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
module = torch.nn.DataParallel(deepcopy(model))
c = checkpoint(module, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
self.assertEqual(b_grad, c_grad)
def test_checkpointing_without_reentrant_parameter_used_in_an_out(self):
"""
Ensures that gradient hooks are only called once per tensor.
"""
w = torch.randn(10, 10, requires_grad=True)
count = 0
def hook(grad):
nonlocal count
count += 1
w.register_hook(hook)
x = torch.rand(10, 10, requires_grad=True)
h = w * x # Using w outside the checkpoint
out = checkpoint(lambda x: w * x, h, use_reentrant=False) # Using w inside the checkpoint
out.sum().backward()
# should only call hook once
self.assertEqual(count, 1)
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
"""
Ensures checkpointing without reentrant autograd works with functions
with arbitrary input/output structures.
"""
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(5, 5, bias=False)
def forward(self, dict_input):
tensor = dict_input["tensor"]
return {
"result": self.layer(tensor)
}
model_no_checkpoint = MyModel()
model_checkpoint_without_reentrant = deepcopy(model_no_checkpoint)
inp = {
"tensor": torch.randn(5, 5)
}
out_no_checkpoint = model_no_checkpoint(inp)["result"].sum()
out_checkpoint = checkpoint(
model_checkpoint_without_reentrant,
inp,
use_reentrant=False
)["result"].sum()
self.assertEqual(out_checkpoint, out_no_checkpoint)
out_no_checkpoint.backward()
out_checkpoint.backward()
for param, checkpoint_param in zip(model_no_checkpoint.parameters(), model_checkpoint_without_reentrant.parameters()):
self.assertEqual(param.grad, checkpoint_param.grad)
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
if hasattr(out.grad_fn, '_saved_scale_factors'):
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
else:
self.assertIsNone(out.grad_fn._saved_scales) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward0 is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertEqual(non_inplace_grad, inplace_grad)
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
class BadBw2(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
@staticmethod
def backward(ctx, foo):
return foo
@staticmethod
def vjp(ctx, foo):
return foo
class BadJvp(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
BadBw.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "Implementing both 'backward' and 'vjp'"):
BadBw2.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "must implement the jvp function"):
with fwAD.dual_level():
d = fwAD.make_dual(inp, torch.rand_like(inp))
res = BadJvp.apply(d)
def test_custom_function_forward_mode_view_checks(self):
flag_to_error = {
"ok": None,
"not_a_view": "jvp is not returning a view",
"not_a_view_of_inp": "jvp is not returning a view of the given",
"not_a_view_of_inp_base": "jvp is not returning a view of the same base",
}
class ViewFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.flag = flag
ctx.size = foo.size()
return foo.narrow(0, 0, 2)
@staticmethod
def vjp(ctx, gO):
gI = gO.new_zeros(ctx.size)
gI.narrow(0, 0, 2).copy_(gO)
return gI, None
@staticmethod
def jvp(ctx, gI, _):
res = gI.narrow(0, 0, 2)
if ctx.flag != "ok":
# Break the view in the gradients!
res = res.clone()
if ctx.flag in ["not_a_view_of_inp", "not_a_view_of_inp_base"]:
# Result should be a view, just of the wrong thing
res = res.view_as(res)
return res
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
for flag, msg in flag_to_error.items():
def test_fn(inp):
if flag == "not_a_view_of_inp_base":
inp = inp.view_as(inp)
return ViewFn.apply(inp, flag)
if msg is None:
gradcheck(test_fn, inp, check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(test_fn, inp, check_forward_ad=True)
def test_custom_function_forward_mode_inplace_checks(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.mark_dirty(foo)
ctx.flag = flag
foo.mul_(2)
return foo
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.flag:
# Don't do the change inplace
return 2 * gI
else:
gI.mul_(2)
return gI
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
def test_fn(inp, flag):
inp = inp.clone()
return InplaceFn.apply(inp, flag)
gradcheck(test_fn, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "inplace custom Function is not modifying the forward mode gradients inplace"):
gradcheck(test_fn, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_wrong_formula(self):
class UserFn(Function):
@staticmethod
def forward(ctx, foo, should_fail):
ctx.should_fail = should_fail
return foo * 2
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.should_fail:
# Wrong gradient formula
return 3 * gI
else:
return 2 * gI
inp = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "Jacobian computed with forward mode mismatch for output 0"):
gradcheck(UserFn.apply, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_non_tensor_before_tensor_args(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, nt, x, nt2, y):
return x * 2 + y * 3
@staticmethod
def jvp(ctx, nt, x_t, nt2, y_t):
self.assertIsNone(nt)
self.assertIsNone(nt2)
return x_t * 2 + y_t * 3
x = torch.tensor(1., dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
y = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, t)
MyFn.apply(1, dual_x, 1, y)
gradcheck(MyFn.apply, (1, x.requires_grad_(True), 1, y.requires_grad_(True)), check_forward_ad=True,
check_backward_ad=False, check_batched_grad=False)
def test_custom_function_forward_mode_forward_is_no_op(self):
error_regex = "A custom Function's forward is returning a view \\(or an input as-is\\)"
return_lambdas = {
# If we return an input as-is in forward, that is treated
# as if self.view_as(self) is performed. If jvp returns x.view_as(x),
# this is OK.
"view_as": lambda x: x.view_as(x),
# Expect this to raise an error
"self": lambda x: x,
# Expect this to raise the same error
"mul_by_2": lambda x: x * 2,
}
for k, fn in return_lambdas.items():
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return x + y, x
@staticmethod
def vjp(ctx, gO1, gO2):
return gO1 + gO2, gO1
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t + y_t, fn(x_t)
a = torch.tensor(1., dtype=torch.double, requires_grad=True)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(1., dtype=torch.double, requires_grad=True)
c = torch.tensor(1., dtype=torch.double)
t2 = torch.tensor(1., dtype=torch.double)
d = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
c_dual = fwAD.make_dual(c, t2)
if k == "view_as":
_, out2 = MyFn.apply(a_dual, b)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t)
_, out2 = MyFn.apply(c_dual, d)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t2)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(a_dual, b)
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(c_dual, d)
if k == "view_as":
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
def test_custom_function_save_for_forward(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
ctx.save_for_backward(x, y)
ctx.save_for_forward(x, y)
ctx.z = z
ctx.prod = x * y
return z * ctx.prod
@staticmethod
def jvp(ctx, x_t, y_t, _):
x_p, y_p = ctx.saved_tensors
z = ctx.z
return z * (y_p * x_t + x_p * y_t)
@staticmethod
def vjp(ctx, grad_out):
x, y = ctx.saved_tensors
z = ctx.z
return z * grad_out * y, z * grad_out * x, None
a = torch.tensor(1., requires_grad=True, dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(2., requires_grad=True, dtype=torch.double)
c = 4
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual, b, c)
out.backward()
gradcheck(Func.apply, (a, b, c), check_forward_ad=True)
# When saved for backward, but not saved for forward
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
ctx.save_for_backward(x)
return x.clone()
@staticmethod
def jvp(ctx, x_t):
self.assertEqual(len(ctx.saved_tensors), 0)
return x_t
@staticmethod
def vjp(ctx, grad_out):
x, = ctx.saved_tensors
self.assertEqual(len(ctx.saved_tensors), 1)
return grad_out
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual)
out.backward()
gradcheck(Func.apply, (a,), check_forward_ad=True)
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: (x, b), lambda x: x)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self):
# Tests that default hooks are properly registered, used and reset
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
# See also:
# - test_saved_variable_packing_unpacking_saved_original_with_hooks
def pack(x):
warnings.warn("pack")
return x
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
a = torch.ones(5, requires_grad=True)
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as w:
y = a * a
# should raise two warnings from a being saved twice
self.assertEqual(len(w), 2)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(2 * a, y.grad_fn._saved_self)
self.assertEqual(2 * a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(4 * a, a.grad)
# Exited hooks correctly
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(self):
# See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
self.assertEqual(y, y.grad_fn._saved_result)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_setting_default_saved_variable_hooks_twice_should_not_fail(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
def test_setting_default_saved_variable_hooks_twice_should_use_inner(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: 3 * x, lambda x: 3 * x):
b = torch.randn(5, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 5 * x, lambda x: 5 * x):
a = torch.randn(5, requires_grad=True)
y = a * a
z = b * b
y.sum().backward()
z.sum().backward()
self.assertEqual(2 * 5 * 5 * a, a.grad)
self.assertEqual(2 * 3 * 3 * b, b.grad)
def test_save_on_cpu_and_checkpoint(self):
a = torch.randn(2, 2, requires_grad=True)
b = a.pow(2).pow(2).pow(2).pow(2)
b.sum().backward()
b_grad = a.grad.clone()
a.grad.zero_()
with torch.autograd.graph.save_on_cpu():
h = a.pow(2)
h = checkpoint(lambda x: x.pow(2).pow(2), h, use_reentrant=False)
c = h.pow(2)
c.sum().backward()
c_grad = a.grad.clone()
a.grad.zero_()
def f(a):
h = a.pow(2)
with torch.autograd.graph.save_on_cpu():
h = h.pow(2).pow(2)
return h.pow(2)
d = checkpoint(f, a, use_reentrant=False)
d.sum().backward()
d_grad = a.grad.clone()
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_pack_hook_with_inplace_modification_should_fail(self):
a = torch.randn(5, requires_grad=True)
def inc(x):
x += 1
return x
with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x):
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y = torch.exp(a)
y = torch.exp(a)
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x)
def test_saving_variable_to_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
def pack(x):
name = os.path.join(tmp_dir, str(uuid.uuid4()))
torch.save(x, name)
return name
def unpack(name):
return torch.load(name)
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
a = torch.ones(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_default_saved_variable_hooks_double_backward(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
self.assertEqual(6 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 2 because only a is saved once
self.assertEqual(6 * 2 * a, a.grad)
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 4 because pow_backward is grad * (exp * self.pow(exp - 1))
# so grad is saved and self (i.e. a) is saved
self.assertEqual(6 * 4 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# combining the two above blocks: 2 * 4 = 8
# note that in that sense, a is saved twice
self.assertEqual(6 * 8 * a, a.grad)
def test_graph_save_on_cpu(self):
def test(get_input, cuda, pin_memory):
with torch.autograd.graph.save_on_cpu(pin_memory):
a = get_input()
if cuda:
a.cuda()
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype)
self.assertEqual(a.layout, y.grad_fn._saved_self.layout)
if y.is_sparse:
y = y.to_dense()
y.sum().backward()
actual = 2 * a
expected = a.grad
if a.is_sparse:
actual = actual.coalesce()
expected = expected.coalesce()
self.assertEqual(actual, expected)
for cuda in [False] + ([True] if torch.cuda.is_available() else []):
for pin_memory in [True, False]:
# FloatTensor
test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory)
# DoubleTensor
test(lambda: torch.randn(5, requires_grad=True, dtype=torch.double), cuda, pin_memory)
# Sparse tensor
x = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]), requires_grad=True)
test(lambda: x, cuda, pin_memory)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_graph_save_on_cpu_cuda(self):
def f(x):
a = x + 1
return a * a
# with grad
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_grad = torch.cuda.memory_allocated()
del a
del y
# without grad
a = torch.ones(1, requires_grad=True, device="cuda")
with torch.no_grad():
y = f(a)
memory_without_grad = torch.cuda.memory_allocated()
self.assertGreater(memory_with_grad, memory_without_grad)
del a
del y
# with hooks
with torch.autograd.graph.save_on_cpu():
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_hooks = torch.cuda.memory_allocated()
self.assertEqual(memory_with_hooks, memory_without_grad)
def test_pynode_destruction_deadlock(self):
script = """
import torch
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def forward(ctx, gO):
return gO.clone()
def get_out():
inp = torch.rand(2, requires_grad=True)
# The python function is first so that it runs
# last in the backward pass
right = Foo.apply(inp)
# An op that creates new memory
left1 = inp.clone()
# An op that saves its input
left2 = left1 ** 2
# Inplace modify so that the backward for
# left2 always raises an error
left1 += 1
# An op that takes both side as input.
# After running, both side's last op will be in
# the ready queue
# And the op for left will run first as it was
# executed last during the forward
out = left2 + right
return out
# Nothing should be global variables here as, from what
# I can see, python leaks all the global objects
get_out().sum().backward()
# This used to deadlock when the PyNode is being destroyed after
# the error is raised.
"""
try:
subprocess.check_output(
[sys.executable, '-c', script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),
# It is ok to have an extra long timeout here as a timeout means the test failed
timeout=20)
except subprocess.TimeoutExpired as e:
self.fail(msg="Example code timed out! See the code sample in the test for details.")
except subprocess.CalledProcessError as e:
err_msg = "RuntimeError: one of the variables needed for gradient computation"
self.assertTrue(err_msg in e.output.decode("utf-8"))
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
class TestAutogradForwardModeBatchedGrad(TestCase):
def test_out_of_place_basic(self):
a = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
b = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
self.assertTrue(gradcheck(torch.sin, a, check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
self.assertTrue(gradcheck(torch.add, (a, b), check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
def test_out_of_place_not_same_layout(self):
input = torch.zeros([2, 2]).transpose(0, 1)
tangent = torch.zeros([2, 2, 2])
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
return fwAD.unpack_dual(x)[1]
x_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIsNot(x_tangent, tangent)
def test_inplace_on_view_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
base = torch.zeros([2, 2])
view = base.view_as(base)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertFalse(view_tangent._is_view()) # Optimization to share the same tensor!
self.assertIs(view_tangent, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIs(view_tangent, tangent)
def test_inplace_on_view_not_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
view = torch.zeros([2, 2]).transpose(0, 1)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIs(view_tangent._base, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIsNot(view_tangent, tangent)
def test_metadata_check_for_storage_numel_skipped(self):
# See: test_metadata_check_checks_storage_numel for the reverse of this test
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(10, 4)
def jvp(tangent):
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# No copy is made
self.assertIs(tangent, unpacked_tangent)
# as_strided raises
with self.assertRaisesRegex(RuntimeError, "can access memory outside of `tensor`"):
dual.as_strided((5,), (1,), 0)
return unpacked_tangent
torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
def test_metadata_check_checks_storage_numel(self):
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(4)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# # Verify that mutating unpacked tangent does not affect the original tangent
tangent_clone = tangent.clone()
unpacked_tangent *= 2
self.assertTrue(torch.allclose(tangent_clone, tangent))
# as_strided runs without error
dual.as_strided((5,), (1,), 0)
def test_metadata_check_checks_ignores_size_zero(self):
a = torch.ones(0).as_strided((0, 1,), (1, 1,), 0)
b = torch.ones(0).as_strided((0, 1,), (1, 0,), 0)
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
torch.diagonal(dual, offset=0)
input = torch.rand([0, 1], dtype=torch.complex128, requires_grad=True)
func = partial(torch.diagonal, offset=0)
torch.autograd.gradcheck(func, (input,), check_forward_ad=True)
def test_metadata_check_when_primal_has_conj_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj()
b = torch.rand_like(a)
self.assertTrue(torch.is_conj(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
def test_metadata_check_when_primal_has_neg_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj().imag
b = torch.randn(2, 2, dtype=torch.cdouble).imag
self.assertTrue(torch.is_neg(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
def test_metadata_check_check_conj(self):
keys = {
"NEITHER": lambda x: x,
"CONJ": lambda x: x.conj(),
"NEG": lambda x: x._neg_view()
}
for primal_key, tangent_key in product(keys, keys):
x = keys[primal_key](torch.randn(2, 3, 4, dtype=torch.cdouble))
t = keys[tangent_key](torch.randn(2, 3, 4, dtype=torch.cdouble))
if primal_key == tangent_key:
with fwAD.dual_level():
dual = fwAD.make_dual(x, t)
self.assertTrue(fwAD.unpack_dual(dual).tangent is t)
torch.real(dual)
torch.imag(dual)
else:
with fwAD.dual_level():
dual = fwAD.make_dual(x, t)
self.assertTrue(fwAD.unpack_dual(dual).tangent is not t)
torch.real(dual)
torch.imag(dual)
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_set_fw_grad_having_own_fw_grad_at_same_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
with self.assertRaisesRegex(RuntimeError, "has a forward gradient at the same level"):
fwAD.make_dual(baz, dual)
def test_make_dual_inference_tensor_in_inference_mode(self):
with torch.inference_mode():
foo = torch.rand(2)
bar = torch.rand(2)
foo_copy = foo.clone()
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertFalse(dual._is_view())
dual += 1
self.assertFalse(torch.allclose(foo, foo_copy))
def test_make_dual_torch_dispatch(self):
counter = [0]
class MySubclass(torch.Tensor):
def __new__(cls, data=None):
return torch.Tensor._make_subclass(cls, data)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func.overloadpacket == torch.ops.aten.alias:
counter[0] += 1
# Make sure we can re-enable autograd here
with torch.overrides.enable_reentrant_dispatch():
foo = torch.rand(1, requires_grad=True)
self.assertIsNotNone(foo.exp().grad_fn)
with no_dispatch():
return func(*args, **kwargs)
a = torch.tensor(1.)
s = MySubclass(a)
with fwAD.dual_level():
# Only the primal has "alias" called on it
fwAD.make_dual(s, torch.rand_like(s))
self.assertEqual(counter[0], 1)
fwAD.make_dual(torch.rand_like(s), s)
self.assertEqual(counter[0], 1)
def test_make_dual_forbid_integral_dtype(self):
primal_f = torch.ones(2, 2, dtype=torch.float)
primal_l = torch.ones(2, 2, dtype=torch.long)
tangent_f = torch.ones(2, 2, dtype=torch.float)
tangent_l = torch.ones(2, 2, dtype=torch.long)
with fwAD.dual_level():
# Float Primal and Long Tangent
with self.assertRaisesRegex(ValueError, "Expected tangent to be floating point or complex"):
fwAD.make_dual(primal_f, tangent_l)
# Long Primal and Long Tangent
with self.assertRaisesRegex(ValueError, "Expected primal to be floating point or complex"):
fwAD.make_dual(primal_l, tangent_l)
# Long Primal and Float Tangent
with self.assertRaisesRegex(ValueError, "Expected primal to be floating point or complex"):
fwAD.make_dual(primal_l, tangent_f)
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check unpacked dual is returned as a named tuple
# NB: Every invocation of unpack_dual returns a new tensor view
self.assertIsNot(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertEqual(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertIs(baz_tangent, fwAD.unpack_dual(baz).tangent)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_view_inplace_always_creates_a_view(self):
# See https://github.com/pytorch/pytorch/issues/67800
# The codepath may depend on the op. At the time writing, when self is not a dual tensor
# the resulting forward grad for self for...
# - add_ has the same layout as self
# - mul_ has the same layout as other
# This is kind of fragile because the above depends on how the forward grad expression
# is written. For add and mul at least, the output inherits the layout of LHS.
# We want to handle at least these two cases.
inplace_binary_ops = ( # Add more to this list?
lambda x, y: x.add_(y),
lambda x, y: x.mul_(y),
lambda x, y: x.copy_(y),
)
for inplace_binary_op in inplace_binary_ops:
base = torch.randn(2, 2)
view = base.transpose(0, 1)
primal = torch.randn(2, 2)
tangent = torch.randn(2, 2)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
inplace_binary_op(view, dual)
# Verify that a view relationship is created for both the primal and tangent
p, t = fwAD.unpack_dual(base)
p_clone = p.clone()
t_clone = t.clone()
view *= 2
p, t = fwAD.unpack_dual(base)
self.assertTrue(torch.allclose(p_clone * 2, p))
self.assertTrue(torch.allclose(t_clone * 2, t))
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
def test_non_differentiable(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
# No differentiable outputs, shouldn't error
eq = foo == bar
# Inplace
foo.eq_(bar)
def test_create_new_zeros_with_same_meta(self):
new_zeroes_fn = torch.ops.aten._new_zeros_with_same_feature_meta
def check(a, b):
def assert_same_meta(t, target):
for num_bdim in range(t.dim()):
result = new_zeroes_fn(t, target, self_num_batch_dims=num_bdim)
self.assertEqual(result.dim(), target.dim() + num_bdim)
# Check size/strides match for feature dims only
for i in range(num_bdim, result.dim()):
self.assertEqual(result.size()[i], target.size()[i - num_bdim])
self.assertEqual(result.stride()[i], target.stride()[i - num_bdim])
# Check that we generate strides reasonably
if target.is_contiguous():
self.assertTrue(result.is_contiguous())
self.assertEqual(result.storage_offset(), target.storage_offset())
prod_of_t_bdims = reduce(operator.mul, t.size()[:num_bdim], 1)
self.assertEqual(len(result.storage()), len(target.storage()) * prod_of_t_bdims)
# TensorOptions is same
self.assertEqual(result.dtype, target.dtype)
assert_same_meta(a, b)
assert_same_meta(b, a)
a = torch.randn(5, dtype=torch.float)
b = torch.randn(2, 3, 4, dtype=torch.double)
check(a, b)
# non-contiguous case
a = torch.randn(2, 3, 4).transpose(0, 1).contiguous().transpose(0, 1)
b = torch.randn(2, 3, 4)
check(a, b)
a = torch.randn(5).narrow(0, 1, 2)
b = torch.randn(2)
check(a, b)
# tensor is not a view, but still does not index entirety of storage
a = torch.randn(5).resize_(4)
b = torch.randn(4)
check(a, b)
# Zero-numel tensors
a = torch.randn(1, 0, 2)
b = torch.randn(1, 2)
check(a, b)
# Scalar tensor
a = torch.tensor(1.)
b = torch.randn(1, 2)
check(a, b)
def test_backward_graph_destruction(self):
def fn():
a = torch.rand(10, requires_grad=True)
da = fwAD.make_dual(torch.rand_like(a), a)
# Create an object with a c++ cycle as:
# db -> AutogradMeta -> ForwardGrad -> db's grad
# db's grad -> AutogradMeta -> MulBackward
# MulBackward -> SavedVariable -> db
db = da.exp()
with fwAD.dual_level():
fn()
# This test make sure that we don't deadlock on exit of this
# context manager. If you do, there is something wrong with the
# locking of the forward ad level most likely
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_scatter_index_reduce_amin_amax_backprops_to_all_values(self, device):
# tests that gradients are evenly distributed when there are multiple max/min values
# tested here instead of adding a SampleInput as the backward for this case is non-differentiable for gradgrad
# as is the case for test_min_max_median_backprops_to_all_values above
fns = (torch.scatter_reduce, torch.index_reduce)
reduces = ('amin', 'amax')
for fn, reduction in product(fns, reduces):
input = torch.randn((2, 3), device=device, dtype=torch.float64, requires_grad=True)
src = input.clone().detach_().requires_grad_(True)
idx = torch.arange(2).to(dtype=torch.long, device=device)
if fn == torch.scatter_reduce:
idx = idx.unsqueeze(-1).expand((2, 3))
gradcheck(fn, (input, 0, idx, src, reduction), check_batched_grad=False)
def test_scatter_index_reduce_prod_gradgrad_error(self, device):
# test that double backward raises an error for the case where 2 zeros in src
# are scattered to the same position in self
input = torch.tensor([1.], device=device, dtype=torch.float64, requires_grad=True)
src = torch.tensor([0., 0.], device=device, dtype=torch.float64, requires_grad=True)
idx = torch.tensor([0, 0], device=device, dtype=torch.long)
for fn in (torch.scatter_reduce, torch.index_reduce):
# check that this case passes on gradcheck
gradcheck(fn, (input, 0, idx, src, 'prod'), check_batched_grad=False)
with self.assertRaisesRegex(RuntimeError, "Double backward is unsupported for"):
gradgradcheck(fn, (input, 0, idx, src, 'prod'))
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@skipIfMps # the test doesn't work on MPS as double types are not supported
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@skipMeta
@skipIfMps
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
@skipIfMps
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfMPS(torch.float32)
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
def _get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
before = _get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = _get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = _get_cuda_memory_usage()
self.assertEqual(before, after)
@skipIfMps # the test doesn't work on MPS
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
@skipIfMps # the test doesn't work on MPS
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@skipIfMps # the test doesn't work as randn is not supported with type long
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong type raises
with self.assertRaisesRegex(TypeError, "expected to be a Tensor or None"):
x.grad = 0
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@dtypesIfMPS(torch.float32)
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = floating_types_and(torch.half, torch.bfloat16)
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
def test_copy_forward_ad_broadcasting(self, device):
# copy_ allows the src to have a different shape from self as long as src is
# broadcastable to self. Make sure forward AD handles this case.
primal = torch.rand(3, 3, device=device)
tangent = torch.rand(3, 3, device=device)
non_dual = torch.rand(1, 3, 3, device=device)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
non_dual.copy_(dual)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
@skipIfMps # MPS backend doesn't support double types
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
@skipIfMps # MPS backend doesn't support double types
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
@skipIfMps
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_to_r_to_c(self, device):
def do_test():
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
out = inp_r.to(torch.complex128)
out.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
def test_warning_in_backward(self, device):
# Test warning during backward are always propagated as python warnings (gh-50209)
# NOTE: For device=cuda, warning gets propagated from a worker thread
a = torch.zeros((), device=device, requires_grad=True)
b = torch._C._nn._test_warn_in_autograd(a)
with self.assertWarnsRegex(UserWarning, "Warn from backward"):
b.backward()
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
for mode in (True, False):
@torch.inference_mode(mode)
def func(x):
self.assertEqual(torch.is_inference_mode_enabled(), mode)
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(not mode or torch.is_inference(d))
self.assertEqual(d.requires_grad, requires_grad and not mode)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
class PropagatingThread(threading.Thread):
'''Helper class to propagate exception from child
thread to main thread on join.
Reference: https://stackoverflow.com/a/31614591/5602957
'''
def run(self):
self.exception = None
try:
self.ret = super(PropagatingThread, self).run()
except Exception as e:
self.exception = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exception:
raise self.exception from self.exception
return self.ret
threads = []
for _ in range(num_threads):
p = PropagatingThread(target=fn, args=args)
p.start()
threads.append(p)
for p in threads:
p.join()
def test_multithreaded_exception_propagation(self):
# Test whether exception in child thread
# are propagated to main thread.
def fn():
self.assertTrue(False)
with self.assertRaises(AssertionError):
self._run_py_multithread_fn(fn)
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_multithread_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
def registers_hooks_for_each_thread():
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
x = torch.ones(5, 5, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
y = x * x
# should raise two warnings from x being saved twice
self.assertEqual(len(w), 2)
y.sum().backward()
def test_dataparallel_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
_self = self
class Model(torch.nn.Module):
def forward(self, x):
with warnings.catch_warnings(record=True) as w:
y = x * x
if torch.cuda.device_count() >= 2:
# DataParallel is calling the forward in different threads
# without progating TLS, so hooks should not be called here
_self.assertEqual(len(w), 0)
else:
# DataParallel only uses one thread
# so hooks should be called here
_self.assertGreater(len(w), 0)
x = torch.ones(5, 5, requires_grad=True)
model = torch.nn.DataParallel(Model())
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
model(x)
with warnings.catch_warnings(record=True) as w:
y = x * x
# hooks should be called here
_self.assertGreater(len(w), 0)
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# Import test cases from below autograd/ here. These are found
# implicitly by the loader, so Flake8 thinks they are unused, hence
# the suppressions.
from autograd.test_complex import TestAutogradComplex # noqa: F401
from autograd.test_functional import TestAutogradFunctional # noqa: F401
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
instantiate_parametrized_tests(TestAutograd)
if __name__ == '__main__':
run_tests()
|
ArmTest1.py | #!/usr/bin/python3
# encoding: utf-8
import asyncio
import threading
import time
import ArmController as controller #舵机转动
import random
import websockets
# 机械臂位置校准
def Arm_Pos_Corr():
controller.setServo(1, 1200, 500)
controller.setServo(2, 500, 500)
time.sleep(1)
def get_arm_pos():
while True:
pos = []
for i in [0,2,3,4]:
pos.append(controller.Servos[i].getPosition())
print(pos)
time.sleep(1.0 / 30) # 30 帧/秒
def animate_arm():
while True:
spend = 2000 + random.randint(-20, 20) * 40
# bottom = controller.Servos[4].getPosition() + random.randint(-20, 20) * 20
# middle = controller.Servos[3].getPosition() + random.randint(-20, 20) * 20
# head = controller.Servos[2].getPosition() + random.randint(-20, 20) * 20
# clow = controller.Servos[0].getPosition() + random.randint(-20, 20) * 20
controller.setServo(1, controller.Servos[0].getPosition() + random.randint(-20, 20) * 20, spend)
controller.setServo(3, controller.Servos[2].getPosition() + random.randint(-20, 20) * 20, spend)
controller.setServo(4, controller.Servos[3].getPosition() + random.randint(-20, 20) * 20, spend)
controller.setServo(5, controller.Servos[4].getPosition() + random.randint(-20, 20) * 20, spend)
time.sleep((spend + 100) / 1000)
if __name__ == "__main__":
print("start")
controller.initLeArm([0,0,0,0,0,0])
time.sleep(1)
Arm_Pos_Corr()
get_pos = threading.Thread(target=get_arm_pos)
animate = threading.Thread(target=animate_arm)
# get_pos.setDaemon(True)
# animate.setDaemon(True)
get_pos.start()
animate.start()
get_pos.join()
animate.join()
print("end")
|
methods.py | # Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of fork support test methods."""
import enum
import json
import logging
import multiprocessing
import os
import threading
import time
import grpc
from six.moves import queue
from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
_LOGGER = logging.getLogger(__name__)
_RPC_TIMEOUT_S = 10
_CHILD_FINISH_TIMEOUT_S = 60
def _channel(args):
target = '{}:{}'.format(args['server_host'], args['server_port'])
if args['use_tls']:
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(target, channel_credentials)
else:
channel = grpc.insecure_channel(target)
return channel
def _validate_payload_type_and_length(response, expected_type, expected_length):
if response.payload.type is not expected_type:
raise ValueError('expected payload type %s, got %s' %
(expected_type, type(response.payload.type)))
elif len(response.payload.body) != expected_length:
raise ValueError('expected payload body size %d, got %d' %
(expected_length, len(response.payload.body)))
def _async_unary(stub):
size = 314159
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE,
response_size=size,
payload=messages_pb2.Payload(body=b'\x00' * 271828))
response_future = stub.UnaryCall.future(request, timeout=_RPC_TIMEOUT_S)
response = response_future.result()
_validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE, size)
def _blocking_unary(stub):
size = 314159
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE,
response_size=size,
payload=messages_pb2.Payload(body=b'\x00' * 271828))
response = stub.UnaryCall(request, timeout=_RPC_TIMEOUT_S)
_validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE, size)
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while not self._values and self._open:
self._condition.wait()
if self._values:
return self._values.pop(0)
else:
raise StopIteration()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify()
def close(self):
with self._condition:
self._open = False
self._condition.notify()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class _ChildProcess(object):
def __init__(self, task, args=None):
if args is None:
args = ()
self._exceptions = multiprocessing.Queue()
def record_exceptions():
try:
task(*args)
except grpc.RpcError as rpc_error:
self._exceptions.put('RpcError: %s' % rpc_error)
except Exception as e: # pylint: disable=broad-except
self._exceptions.put(e)
self._process = multiprocessing.Process(target=record_exceptions)
def start(self):
self._process.start()
def finish(self):
self._process.join(timeout=_CHILD_FINISH_TIMEOUT_S)
if self._process.is_alive():
raise RuntimeError('Child process did not terminate')
if self._process.exitcode != 0:
raise ValueError('Child process failed with exitcode %d' %
self._process.exitcode)
try:
exception = self._exceptions.get(block=False)
raise ValueError('Child process failed: "%s": "%s"' %
(repr(exception), exception))
except queue.Empty:
pass
def _async_unary_same_channel(channel):
def child_target():
try:
_async_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
stub = test_pb2_grpc.TestServiceStub(channel)
_async_unary(stub)
child_process = _ChildProcess(child_target)
child_process.start()
_async_unary(stub)
child_process.finish()
def _async_unary_new_channel(channel, args):
def child_target():
with _channel(args) as child_channel:
child_stub = test_pb2_grpc.TestServiceStub(child_channel)
_async_unary(child_stub)
child_channel.close()
stub = test_pb2_grpc.TestServiceStub(channel)
_async_unary(stub)
child_process = _ChildProcess(child_target)
child_process.start()
_async_unary(stub)
child_process.finish()
def _blocking_unary_same_channel(channel):
def child_target():
try:
_blocking_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
stub = test_pb2_grpc.TestServiceStub(channel)
_blocking_unary(stub)
child_process = _ChildProcess(child_target)
child_process.start()
child_process.finish()
def _blocking_unary_new_channel(channel, args):
def child_target():
with _channel(args) as child_channel:
child_stub = test_pb2_grpc.TestServiceStub(child_channel)
_blocking_unary(child_stub)
stub = test_pb2_grpc.TestServiceStub(channel)
_blocking_unary(stub)
child_process = _ChildProcess(child_target)
child_process.start()
_blocking_unary(stub)
child_process.finish()
# Verify that the fork channel registry can handle already closed channels
def _close_channel_before_fork(channel, args):
def child_target():
new_channel.close()
with _channel(args) as child_channel:
child_stub = test_pb2_grpc.TestServiceStub(child_channel)
_blocking_unary(child_stub)
stub = test_pb2_grpc.TestServiceStub(channel)
_blocking_unary(stub)
channel.close()
with _channel(args) as new_channel:
new_stub = test_pb2_grpc.TestServiceStub(new_channel)
child_process = _ChildProcess(child_target)
child_process.start()
_blocking_unary(new_stub)
child_process.finish()
def _connectivity_watch(channel, args):
parent_states = []
parent_channel_ready_event = threading.Event()
def child_target():
child_channel_ready_event = threading.Event()
def child_connectivity_callback(state):
if state is grpc.ChannelConnectivity.READY:
child_channel_ready_event.set()
with _channel(args) as child_channel:
child_stub = test_pb2_grpc.TestServiceStub(child_channel)
child_channel.subscribe(child_connectivity_callback)
_async_unary(child_stub)
if not child_channel_ready_event.wait(timeout=_RPC_TIMEOUT_S):
raise ValueError('Channel did not move to READY')
if len(parent_states) > 1:
raise ValueError(
'Received connectivity updates on parent callback',
parent_states)
child_channel.unsubscribe(child_connectivity_callback)
def parent_connectivity_callback(state):
parent_states.append(state)
if state is grpc.ChannelConnectivity.READY:
parent_channel_ready_event.set()
channel.subscribe(parent_connectivity_callback)
stub = test_pb2_grpc.TestServiceStub(channel)
child_process = _ChildProcess(child_target)
child_process.start()
_async_unary(stub)
if not parent_channel_ready_event.wait(timeout=_RPC_TIMEOUT_S):
raise ValueError('Channel did not move to READY')
channel.unsubscribe(parent_connectivity_callback)
child_process.finish()
def _ping_pong_with_child_processes_after_first_response(
channel, args, child_target, run_after_close=True):
request_response_sizes = (
31415,
9,
2653,
58979,
)
request_payload_sizes = (
27182,
8,
1828,
45904,
)
stub = test_pb2_grpc.TestServiceStub(channel)
pipe = _Pipe()
parent_bidi_call = stub.FullDuplexCall(pipe)
child_processes = []
first_message_received = False
for response_size, payload_size in zip(request_response_sizes,
request_payload_sizes):
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(messages_pb2.ResponseParameters(
size=response_size),),
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
if first_message_received:
child_process = _ChildProcess(child_target,
(parent_bidi_call, channel, args))
child_process.start()
child_processes.append(child_process)
response = next(parent_bidi_call)
first_message_received = True
child_process = _ChildProcess(child_target,
(parent_bidi_call, channel, args))
child_process.start()
child_processes.append(child_process)
_validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE,
response_size)
pipe.close()
if run_after_close:
child_process = _ChildProcess(child_target,
(parent_bidi_call, channel, args))
child_process.start()
child_processes.append(child_process)
for child_process in child_processes:
child_process.finish()
def _in_progress_bidi_continue_call(channel):
def child_target(parent_bidi_call, parent_channel, args):
stub = test_pb2_grpc.TestServiceStub(parent_channel)
try:
_async_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
inherited_code = parent_bidi_call.code()
inherited_details = parent_bidi_call.details()
if inherited_code != grpc.StatusCode.CANCELLED:
raise ValueError('Expected inherited code CANCELLED, got %s' %
inherited_code)
if inherited_details != 'Channel closed due to fork':
raise ValueError(
'Expected inherited details Channel closed due to fork, got %s'
% inherited_details)
# Don't run child_target after closing the parent call, as the call may have
# received a status from the server before fork occurs.
_ping_pong_with_child_processes_after_first_response(channel,
None,
child_target,
run_after_close=False)
def _in_progress_bidi_same_channel_async_call(channel):
def child_target(parent_bidi_call, parent_channel, args):
stub = test_pb2_grpc.TestServiceStub(parent_channel)
try:
_async_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
_ping_pong_with_child_processes_after_first_response(
channel, None, child_target)
def _in_progress_bidi_same_channel_blocking_call(channel):
def child_target(parent_bidi_call, parent_channel, args):
stub = test_pb2_grpc.TestServiceStub(parent_channel)
try:
_blocking_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
_ping_pong_with_child_processes_after_first_response(
channel, None, child_target)
def _in_progress_bidi_new_channel_async_call(channel, args):
def child_target(parent_bidi_call, parent_channel, args):
with _channel(args) as channel:
stub = test_pb2_grpc.TestServiceStub(channel)
_async_unary(stub)
_ping_pong_with_child_processes_after_first_response(
channel, args, child_target)
def _in_progress_bidi_new_channel_blocking_call(channel, args):
def child_target(parent_bidi_call, parent_channel, args):
with _channel(args) as channel:
stub = test_pb2_grpc.TestServiceStub(channel)
_blocking_unary(stub)
_ping_pong_with_child_processes_after_first_response(
channel, args, child_target)
@enum.unique
class TestCase(enum.Enum):
CONNECTIVITY_WATCH = 'connectivity_watch'
CLOSE_CHANNEL_BEFORE_FORK = 'close_channel_before_fork'
ASYNC_UNARY_SAME_CHANNEL = 'async_unary_same_channel'
ASYNC_UNARY_NEW_CHANNEL = 'async_unary_new_channel'
BLOCKING_UNARY_SAME_CHANNEL = 'blocking_unary_same_channel'
BLOCKING_UNARY_NEW_CHANNEL = 'blocking_unary_new_channel'
IN_PROGRESS_BIDI_CONTINUE_CALL = 'in_progress_bidi_continue_call'
IN_PROGRESS_BIDI_SAME_CHANNEL_ASYNC_CALL = 'in_progress_bidi_same_channel_async_call'
IN_PROGRESS_BIDI_SAME_CHANNEL_BLOCKING_CALL = 'in_progress_bidi_same_channel_blocking_call'
IN_PROGRESS_BIDI_NEW_CHANNEL_ASYNC_CALL = 'in_progress_bidi_new_channel_async_call'
IN_PROGRESS_BIDI_NEW_CHANNEL_BLOCKING_CALL = 'in_progress_bidi_new_channel_blocking_call'
def run_test(self, args):
_LOGGER.info("Running %s", self)
channel = _channel(args)
if self is TestCase.ASYNC_UNARY_SAME_CHANNEL:
_async_unary_same_channel(channel)
elif self is TestCase.ASYNC_UNARY_NEW_CHANNEL:
_async_unary_new_channel(channel, args)
elif self is TestCase.BLOCKING_UNARY_SAME_CHANNEL:
_blocking_unary_same_channel(channel)
elif self is TestCase.BLOCKING_UNARY_NEW_CHANNEL:
_blocking_unary_new_channel(channel, args)
elif self is TestCase.CLOSE_CHANNEL_BEFORE_FORK:
_close_channel_before_fork(channel, args)
elif self is TestCase.CONNECTIVITY_WATCH:
_connectivity_watch(channel, args)
elif self is TestCase.IN_PROGRESS_BIDI_CONTINUE_CALL:
_in_progress_bidi_continue_call(channel)
elif self is TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_ASYNC_CALL:
_in_progress_bidi_same_channel_async_call(channel)
elif self is TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_BLOCKING_CALL:
_in_progress_bidi_same_channel_blocking_call(channel)
elif self is TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_ASYNC_CALL:
_in_progress_bidi_new_channel_async_call(channel, args)
elif self is TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_BLOCKING_CALL:
_in_progress_bidi_new_channel_blocking_call(channel, args)
else:
raise NotImplementedError('Test case "%s" not implemented!' %
self.name)
channel.close()
|
mail.py | import time
from datetime import datetime, timedelta
import configparser
from time import sleep
import sqlite3
from loguru import logger
import threading
import smtplib
from email.mime.text import MIMEText
from gmail import Gmail
from threading import Thread
class Notifier(Gmail, Thread):
def __init__(self, config_file):
config = configparser.ConfigParser()
config.read(config_file)
self.enabled = config['mail']['enabled']
self.running = False
self.setup(config_file)
def setup(self, config_file):
self.running_switch = True
self.config_file = config_file
config = configparser.ConfigParser()
config.read(config_file)
gmail_user = config['mail']['gmail_user']
gmail_password = config['mail']['gmail_password']
self.hours_between_every_mail = int(config['mail']['hours_between_every_mail'])
self.minutes_between_every_mail = int(config['mail']['minutes_between_every_mail'])
self.last_mail = datetime.now() - timedelta(hours=self.hours_between_every_mail, minutes=self.minutes_between_every_mail)
self.DATABASE = config['flask']['sqlite_db_name']
Gmail.__init__(self, gmail_user, gmail_password)
self.start_stop = True
Thread.__init__(self)
if int(self.enabled):
self.start()
def update_key_values(self, file, section, key, value):
config = configparser.ConfigParser()
config.read(file)
cfgfile = open(file, 'w')
config.set(section, key, value)
config.write(cfgfile)
cfgfile.close()
def add_recipients(self, new_recipients: list):
old_recipients = self.get_recipients()
new_recipients.extend(old_recipients)
new_recipients = list(dict.fromkeys(new_recipients)) # Remove duplicates
if len(new_recipients) > 1:
new_recipients = ', '.join(new_recipients)
self.update_key_values(self.config_file, 'mail', 'recipients', new_recipients)
def remove_recipients(self, recipients: list):
old_recipients = self.get_recipients()
for recipient in recipients:
old_recipients.remove(recipient)
self.update_key_values(self.config_file, 'mail', 'recipients', ', '.join(old_recipients))
def set_min_temp(self, temp):
self.update_key_values(self.config_file, 'mail', 'min_temp', temp)
def get_min_temp(self):
config = configparser.ConfigParser()
config.read(self.config_file)
min_temp = config['mail']['min_temp']
return int(min_temp)
def update_last_sent_mail(self):
self.update_key_values(self.config_file, "mail", "last_sent_mail", self.timenow())
def get_last_sent_mail(self):
config = configparser.ConfigParser()
config.read(self.config_file)
last = config['mail']['last_sent_mail']
if last == '':
return datetime.now() - timedelta(days=100)
return self.convert_back_to_date_time(last)
def timenow(self):
mydate = datetime.now()
return datetime.strftime(mydate, '%Y-%m-%d %H:%M:%S')
def get_recipients(self):
config = configparser.ConfigParser()
config.read(self.config_file)
recipients = config['mail']['recipients']
recipients = recipients.replace(" ", "")
return recipients.split(",")
def convert_back_to_date_time(self, strftime):
return datetime.strptime(strftime, "%Y-%m-%d %H:%M:%S")
def get_temps(self, start_date, end_date):
conn = sqlite3.connect(self.DATABASE)
cur = conn.cursor()
QUERY = """
SELECT * FROM temps WHERE date > ? AND date < ?
"""
result = list(cur.execute(QUERY, [start_date, end_date]))
return result
def get_interval(self):
config = configparser.ConfigParser()
config.read(self.config_file)
interval = config['mail']['interval']
return int(interval)
def looks_good(self):
conn = sqlite3.connect(self.DATABASE)
cur = conn.cursor()
QUERY = """
SELECT * from temps where date >= ?
"""
data = cur.execute(QUERY, [
datetime.now() - timedelta(minutes=1) # select last minutes
])
data = list(data)
temps = [float(i[0]) for i in data]
good_temps = [i for i in temps if i <= self.get_min_temp()] # -70 -80
return len(good_temps) > ( len(temps) - len(good_temps) ) # more good than bad
def current_temperature(self):
last_temps = self.get_temps(datetime.now()-timedelta(minutes=1), datetime.now())
if len(last_temps):
return "{:.2f}".format(last_temps[0][0])
else:
return "not found"
def starting_message(self):
recipients = self.get_recipients()
self.send_mail(recipients, "test", "hello")
def test_message(self):
recipients = self.get_recipients()
try:
self.send_mail(recipients, "RTMS test message", f"""
Test is working
""")
self.update_last_sent_mail()
except Exception as e:
print(e)
def warning_message(self):
recipients = self.get_recipients()
try:
self.send_mail(recipients, "Monitor warning", f"""
Incorrect temperature detected.
Current temperature: {self.current_temperature()}
""")
self.update_last_sent_mail()
except Exception as e:
print(e)
def enable_notifier(self):
self.start()
self.enabled = True
self.update_key_values(self.config_file, "mail", "enabled", "1")
def disable_notifier(self):
self.stop()
self.enabled = False
self.update_key_values(self.config_file, "mail", "enabled", "0")
# def start(self):
# if self.running:
# print("already running")
# else:
# print("starting notifier")
# thread = threading.Thread(target=self.run)
# thread.start()
# self.start_stop = True
# self.running = True
def stop(self):
print("stop notifier")
self.running = False
self.start_stop = False
def allowed_to_send_mail(self):
time_now = datetime.now()
return self.get_last_sent_mail() <= ( time_now - timedelta(hours=self.hours_between_every_mail, minutes=self.minutes_between_every_mail) )
def run(self):
logger.info("Notifier started...")
looks_good = self.looks_good()
while self.running_switch:
if self.start_stop:
if not looks_good and self.allowed_to_send_mail():
self.warning_message()
sleep(self.get_interval())
else:
print("Shutting down notifier")
break
def stop(self):
self.running_switch = False
if __name__ == '__main__':
Notifier("config.ini") |
RunTesterServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from RunTester.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'RunTester'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from RunTester.RunTesterImpl import RunTester # noqa @IgnorePep8
impl_RunTester = RunTester(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'RunTester'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_RunTester.run_RunTester,
name='RunTester.run_RunTester',
types=[dict])
self.method_authentication['RunTester.run_RunTester'] = 'required' # noqa
self.rpc_service.add(impl_RunTester.status,
name='RunTester.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'RunTester ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
web_serial.py | # -*- coding: utf-8 -*-
from websocket_server import WebsocketServer
import json
import serial
import os
from time import sleep
import threading
class WebSerial(object):
"""this is websocket-serial"""
_instance_lock = threading.Lock()
def __init__(self,port,ipaddr):
self.ipport = port
self.ipaddr = ipaddr
self.web_server = "" #websocket服务器对象
self.web_serial = "" #串口对象
self.web_serial_isopen = False
self.port = "ttyO1"
self.baudrate = 115200
self.bytesize = 8
self.parity = serial.PARITY_NONE
self.stopbits =1
self.timeout = 1
self.xonxoff = False #软件流控
self.rtscts = False #硬件流控
self.uart_auto_send = False #串口自动发送标志
self.uart_set_send_time = 0 #发送时间间隔
self.uart_recv_thread = ""
self.uart_mutex = threading.Lock()
self.uart_auto_send_thread = ""
self.uart_send_is_auto = False
self.uart_data = "" #串口发送的数据
#串口线程处理函数
def __new__(cls, *args, **kwargs):
if not hasattr(WebSerial, "_instance"):
with WebSerial._instance_lock:
if not hasattr(WebSerial, "_instance"):
WebSerial._instance = object.__new__(cls)
return WebSerial._instance
def serial_read_thread(self,client):
while True:
#print("---pthread----")
if self.web_serial_isopen :
try:
data=self.web_serial.read(1)
if data:
n = self.web_serial.inWaiting()
if n:
data = data + self.web_serial.read(n)
self.web_server.send_message(client,data.decode("utf-8"))
#print(data.decode("utf-8"))
data = ''
except:
break
else:
break
def serial_auto_send_thread(self):
while True:
if self.web_serial_isopen :
if self.uart_send_is_auto:
try:
sleep(self.uart_set_send_time / 1000)
self.web_serial.write(self.uart_data.encode("utf-8"))
except:
break
else:
self.uart_send_is_auto = True
break
else:
break
#下面是串口相关函数
def serial_set(self,client,message):
serial_data_set = json.loads(message)
#print(serial_data_set)
#打开串口
if serial_data_set["type"] == "uart_on":
if self.web_serial_isopen :
self.web_server.send_message(client,self.port+" opened!!!\n")
else :
try:
self.web_serial = serial.Serial("/dev/"+serial_data_set["uart_number"], serial_data_set["uart_baudrate"],timeout=self.timeout)
except:
self.web_server.send_message(client,serial_data_set["uart_number"]+" open failed")
return
print(serial_data_set["uart_number"])
if self.web_serial.isOpen() :
print("open success")
self.web_serial_isopen = True
self.uart_recv_thread = threading.Thread(target=self.serial_read_thread,args=(client,))
#sleep(0.001)
self.uart_recv_thread.start()
self.web_server.send_message(client,serial_data_set["uart_number"]+" open success")
else :
print("open failed")
self.uart_send_is_auto = False
self.web_serial_isopen = False
self.web_server.send_message(client,serial_data_set["uart_number"]+" open failed")
#关闭串口
elif serial_data_set["type"] == "uart_off":
#print("uart_off")
if self.web_serial_isopen :
self.uart_send_is_auto = False
self.web_serial_isopen = False
sleep(0.01)
self.web_serial.close()
self.web_server.send_message(client,self.port+" cosled!!!\n")
#串口发送数据
elif serial_data_set["type"] == "uart_send":
if self.web_serial_isopen :
if serial_data_set["uart_how_to_send"] == "man":
self.uart_send_is_auto = False
sleep(0.001)
self.web_serial.write(serial_data_set["uart_data_send"].encode("utf-8"))
else:
self.uart_set_send_time = serial_data_set["uart_set_time"]
self.uart_data = serial_data_set["uart_data_send"]
if self.uart_send_is_auto:
pass
else:
self.uart_send_is_auto = True
self.uart_auto_send_thread = threading.Thread(target=self.serial_auto_send_thread)
self.uart_auto_send_thread.start()
else:
self.web_server.send_message(client,self.port+" cosled!!!\n")
else:
if self.web_serial_isopen :
pass
else:
self.web_server.send_message(client,self.port+" cosled!!!\n")
# 下面是websocket相关函数
def new_client(self,client, server):
print("New client connected and was given id %d" % client['id'])
server.send_message(client,"connected!!!\n")
def client_left(self,client, server):
print("Client(%d) disconnected" % client['id'])
if self.web_serial_isopen :
self.uart_send_is_auto = False
self.web_serial_isopen = False
sleep(0.001)
self.web_serial.close()
self.web_server.send_message(client,self.port+" cosled!!!\n")
def message_received(self,client, server, message):
self.serial_set(client,message)
def websocket_server(self):
self.web_server = WebsocketServer(self.ipport,self.ipaddr)
self.web_server.set_fn_new_client(self.new_client)
self.web_server.set_fn_client_left(self.client_left)
self.web_server.set_fn_message_received(self.message_received)
self.web_server.run_forever()
PORT=9001
IPADDR="0.0.0.0"
if __name__ == '__main__':
myWebSerial = WebSerial(PORT,IPADDR)
myWebSerial.websocket_server()
|
segmentProcess.py | #import logging
#logging.basicConfig(level=logging.DEBUG)
import multiprocessing as mp
import signal
import cProfile
import time
import numpy as np
import cv2
import sys
import pyrealsense2 as rs
import constants
import traceback
import json
import bodyparts as bp
import os
import psutil
def scale(inMat):
a = 255.0 /(constants.MAX_DISTANCE-constants.MIN_DISTANCE)
b = -a * constants.MIN_DISTANCE
scaled = np.clip(inMat, constants.MIN_DISTANCE,
constants.MAX_DISTANCE) * a + b
return scaled.astype(np.uint8)
def unscale(inMat):
a = (constants.MAX_DISTANCE-500.0)/255.0
b = constants.MIN_DISTANCE
unscaled = inMat * a + b
unscaled = unscaled.astype(np.float32)
unscaled[unscaled == constants.MAX_DISTANCE] = constants.MAX_FLOAT32
return unscaled
def paintContour(contours, hierarchy, minVal):
maxArea = 0.;
c = -1
for idx, con in enumerate(contours):
area = cv2.contourArea(con)
if area > constants.MIN_CONTOUR_AREA:
c = idx if area > maxArea else c
maxArea = area if area > maxArea else maxArea
# Draw only the biggest and its holes
alpha = np.zeros(minVal.shape, dtype=np.uint8)
if c >= 0:
cv2.drawContours(alpha, [contours[c]], 0, 255, -1)
for index, child in enumerate(hierarchy[0]):
if child[3] == c:
if cv2.contourArea(contours[index]) > constants.MIN_HOLE_AREA:
#print 'hole', index, c
cv2.drawContours(alpha, [contours[index]], 0, 0, -1)
return alpha
#background subtraction
def subtraction(options, minVal, d):
options = None
if (options and options.get('display')):
print ('#unknowns ', str(d[d == 0.].size))
if (options and options.get('display')):
cv2.imshow('Before subtraction', d / 4500.)
d[d == 0.] = constants.MAX_FLOAT32
#cv2.imshow('Before subtraction', d / 4500.)
# cv2.waitKey(1)
#foregroundSize = d.size - d[d>=(minVal-50.)].size
d[d>=(minVal-constants.DISTANCE_THRESHOLD)] = constants.MAX_FLOAT32
#print count, foregroundSize
#cv2.imshow('After subtraction', d / 4500.)
if (options and options.get('display')):
cv2.imshow('After subtraction', d / 4500.)
ret, thr = cv2.threshold(scale(d), 200, 255, cv2.THRESH_BINARY_INV)
#cv2.imshow('Threshold', thr)
_, contours, hierarchy = cv2.findContours(thr, cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)
return paintContour(contours, hierarchy, minVal)
def initCamera():
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, constants.FRAME_WIDTH,
constants.FRAME_HEIGHT, rs.format.z16, 30)
config.enable_stream(rs.stream.color, constants.FULL_FRAME_WIDTH,
constants.FULL_FRAME_HEIGHT, rs.format.bgr8, 30)
profile = pipeline.start(config)
deviceDepth = profile.get_device().first_depth_sensor()
# deviceDepth.set_option(rs.option.visual_preset, 4) # high density
# print deviceDepth.get_option_value_description(rs.option.visual_preset, 4)
deviceDepth.set_option(rs.option.visual_preset, 5) # medium density
print deviceDepth.get_option_value_description(rs.option.visual_preset, 5)
depthScale = deviceDepth.get_depth_scale()
# align color to depth
align = rs.align(rs.stream.depth)
return (pipeline, depthScale, align)
def warmUp(pipeline):
count = 0
while count < 20:
frames = pipeline.wait_for_frames()
count = count + 1
def inpaint(options, minVal):
#inpainting the background
mask = np.zeros(minVal.shape, dtype=np.uint8)
mask[minVal == constants.MAX_FLOAT32] = 255
if (options and options.get('display')):
cv2.imshow('before', minVal / 4500.)
# cv2.imshow('before', minVal / 4500.)
# cv2.waitKey(0)
if (options and options.get('display')):
cv2.imshow('mask', mask)
#cv2.imshow('mask', mask)
#cv2.waitKey(0)
scaledMin = scale(minVal)
scaledPatchedMin = cv2.inpaint(scaledMin,mask,3,cv2.INPAINT_TELEA)
approxMinVal = unscale(scaledPatchedMin)
#print (approxMinVal[mask == 255])
minVal[mask == 255] = approxMinVal[mask == 255]
if (options and options.get('display')):
cv2.imshow('after', minVal / 4500.)
# cv2.imshow('after', minVal / 4500.)
# cv2.waitKey(0)
return minVal
def computeBackground(options, pipeline, align):
count = 0
minVal = np.full((constants.FRAME_DECIMATED_HEIGHT,
constants.FRAME_DECIMATED_WIDTH),
constants.MAX_FLOAT32, dtype=np.float32)
decimation = rs.decimation_filter()
decimation.set_option(rs.option.filter_magnitude, 4)
temporal = rs.temporal_filter()
while count < 60:
frames = pipeline.wait_for_frames()
alignedFrames = align.process(frames)
depth = alignedFrames.get_depth_frame()
# depth = frames.get_depth_frame()
filtered_depth = decimation.process(depth)
# filtered_depth = temporal.process(filtered_depth)
# filtered_depth = depth
d = np.asanyarray(filtered_depth.get_data()).astype(np.float32)
print d.shape, d.dtype
zeros = d.size - np.count_nonzero(d)
print('Input:zeros:' + str(zeros) + ' total:' + str(d.size))
d[d == 0.] = constants.MAX_FLOAT32
minVal = np.minimum(minVal, d)
print ('Minval: zeros:' +
str(minVal[minVal == constants.MAX_FLOAT32].size) +
' total:' + str(minVal.size))
count = count + 1
return inpaint(options, minVal)
class Intrinsic:
def __init__(self, intrinsics):
self.width = intrinsics.width
self.height = intrinsics.height
self.ppx = intrinsics.ppx
self.ppy = intrinsics.ppy
self.fx = intrinsics.fx
self.fy = intrinsics.fy
def __repr__(self):
return "width: {0}, height: {1}, ppx: {2}, ppy: {3}, fx: {4}, fy: {5}" \
.format(self.width, self.height, self.ppx, self.ppy, self.fx,
self.fy)
def readProcess(q):
pipeline, depthScale, align = initCamera()
counter = 0
counterOld = 0
status = {'processing': True}
def handler(signum, fr):
print 'Handler called'
sys.stdout.flush()
status['processing'] = False
signal.signal(signal.SIGTERM, handler)
try:
warmUp(pipeline)
minVal = computeBackground(None, pipeline, align)
q.put(['minVal', depthScale, minVal])
time.sleep(1) # ensure main thread gets it
t0 = time.time()
decimation = rs.decimation_filter()
decimation.set_option(rs.option.filter_magnitude,
constants.DECIMATION_FACTOR)
temporal = rs.temporal_filter(0.5, 20, 5)#valid 1 of last 2
while True and status['processing']:
counter = counter + 1
frames = pipeline.wait_for_frames()
if not status['processing']:
break
depth = frames.get_depth_frame()
filtered_depth = decimation.process(depth)
filtered_depth = temporal.process(filtered_depth)
# if counter % 2 == 1:
#help the temporal filter since we cannot process full rate
# continue
intrinsics = filtered_depth.profile.as_video_stream_profile().intrinsics
d = np.asanyarray(filtered_depth.get_data()).astype(np.float32)
#replace last frame to improve latency
while not q.empty():
try:
ignore = q.get(False) # ensure it is empty
except:
#ignore a race that empties the queue
None
#queue should be empty by now
#print('<<>>>', intrinsics)
intr = Intrinsic(intrinsics)
#print ('<<>>', intr)
q.put(['depth', intr, d])
if counter % 120 == 0:
t1 = time.time()
print 'S#{:.3f} images/sec'.format((counter-counterOld)/(t1-t0))
sys.stdout.flush()
t0 = t1
counterOld = counter
finally:
pipeline.stop()
q.close()
q.cancel_join_thread() #brute force quit
print 'Exiting readProcess'
# BUG HACK FIX...
# librealsense does not seem to restart properly without a process exit
# and I am relying on docker run -restart=always to start it again.
# Note that this process is stateless and restarts are very fast.
print ('Exit process 2')
sys.exit(0)
def pinProcess(pid, affinity):
p = psutil.Process(pid)
# parent = p.parent()
# p = parent if parent != None else p
p.cpu_affinity(affinity)
print ('Set affinity to process {} {}'.format(p.pid, affinity))
for x in p.children(recursive=True):
print ('Set affinity to process {} {}'.format(x.pid, affinity))
x.cpu_affinity(affinity)
for x in p.threads():
print ('Set affinity to process {} {}'.format(x.id, affinity))
newP = psutil.Process(x.id)
newP.cpu_affinity(affinity)
def mainSegment(options = None):
counter = 0
rp = None
if constants.PROFILE_ON:
pr = cProfile.Profile() #profile
pr.enable() #profile
try:
net = bp.newNet()
counterOld = counter
myType = None
framesQueue = mp.Queue()
rp = mp.Process(target=readProcess, args=(framesQueue,))
pinProcess(os.getpid(), [1])
rp.start()
time.sleep(1) # let rp create all threads before pinning
pinProcess(rp.pid, [2])
t0 = time.time()
while myType != 'minVal':
myType, depthScale, minVal = framesQueue.get(True, 15)
myType = None
#print (minVal.shape)
sys.stdout.flush()
while True:
counter = counter + 1
while myType != 'depth':
myType, intrinsics, d = framesQueue.get(True, 15)
myType = None
#print intrinsics
alpha = subtraction(options, minVal, d)
# if (options and options.get('display')):
# cv2.imshow('Contour', alpha)
if constants.PROFILE_ON:
result = pr.runcall(bp.process, options, net, d, alpha,
depthScale, intrinsics) #profile
else:
result = bp.process(options, net, d, alpha, depthScale,
intrinsics)
if (options and options.get('display')):
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if counter % 30 == 0:
t1 = time.time()
print ('{:.3f} images/sec'.format((counter-counterOld)/(t1-t0)))
sys.stdout.flush()
t0 = t1
counterOld = counter
yield json.dumps(result)
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
finally:
print ('Main Exiting')
sys.stdout.flush()
if rp != None:
rp.terminate()
rp.join()
if constants.PROFILE_ON:
pr.disable()
pr.print_stats()#profile
# BUG HACK FIX...
# librealsense does not seem to restart properly without a process exit
# and I am relying on docker run -restart=always to start it again.
# Note that this process is stateless and restarts are very fast.
print ('Exit parent process')
sys.exit(0)
def loop(options = None):
g = mainSegment(options)
for res in g:
print (res)
if __name__ == "__main__":
mainSegment()
# cProfile.run('mainSegment()') #mainSegment()
|
HackRequests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : w8ay
# @Mail : w8ay@qq.com
# @File : hackRequests.py
import copy
import gzip
import queue
import socket
import ssl
import threading
import time
import zlib
from http import client
from urllib import parse
class HackError(Exception):
def __init__(self, content):
self.content = content
def __str__(self):
return self.content
def extract_dict(text, sep, sep2="="):
"""根据分割方式将字符串分割为字典
Args:
text: 分割的文本
sep: 分割的第一个字符 一般为'\n'
sep2: 分割的第二个字符,默认为'='
Return:
返回一个dict类型,key为sep2的第0个位置,value为sep2的第一个位置
只能将文本转换为字典,若text为其他类型则会出错
"""
_dict = dict([l.split(sep2, 1) for l in text.split(sep)])
return _dict
class httpcon(object):
'''
httpcon用于生成HTTP中的连接。
Attributes:
timeout: 超时时间
'''
def __init__(self, timeout=10):
self.timeout = timeout
self.protocol = []
self._get_protocol()
def _get_protocol(self):
if not self.protocol:
ps = (
'PROTOCOL_SSLv23', 'PROTOCOL_TLSv1',
'PROTOCOL_SSLv2', 'PROTOCOL_TLSv1_1', 'PROTOCOL_TLSv1_2')
for p in ps:
pa = getattr(ssl, p, None)
if pa:
self.protocol.append(pa)
'''
得到一个连接
这是连接池中最重要的一个参数,连接生成、复用相关操作都在这
'''
def get_con(self, url, proxy=None):
scheme, host, port, path = url
conn = self._make_con(scheme, host, port, proxy)
return conn
def _make_con(self, scheme, host, port, proxy=None):
if "https" != scheme:
if proxy:
con = client.HTTPConnection(
proxy[0], int(proxy[1]), timeout=self.timeout)
con.set_tunnel(host, port)
else:
con = client.HTTPConnection(host, port, timeout=self.timeout)
# con.connect()
return con
for p in self.protocol:
context = ssl._create_unverified_context(p)
try:
if proxy:
con = client.HTTPSConnection(
proxy[0], proxy[1], context=context,
timeout=self.timeout)
con.set_tunnel(host, port)
else:
con = client.HTTPSConnection(
host, port, context=context, timeout=self.timeout)
# con.connect()
return con
except ssl.SSLError:
pass
raise Exception('connect err')
class hackRequests(object):
'''
hackRequests是主要http请求函数。
可以通过http或者httpraw来访问网络
'''
def __init__(self, conpool=None):
self.lock = threading.Lock()
if conpool is None:
self.httpcon = httpcon(timeout=17)
else:
self.httpcon = conpool
def _get_urlinfo(self, url, realhost: str):
p = parse.urlparse(url)
scheme = p.scheme.lower()
if scheme != "http" and scheme != "https":
raise Exception("http/https only")
hostname = p.netloc
port = 80 if scheme == "http" else 443
if ":" in hostname:
hostname, port = hostname.split(":")
path = ""
if p.path:
path = p.path
if p.query:
path = path + "?" + p.query
if realhost:
if ":" not in realhost:
realhost = realhost + ":80"
hostname, port = realhost.split(":")
return scheme, hostname, int(port), path
def _send_output(self, oldfun, con, log):
def _send_output_hook(*args, **kwargs):
log['request'] = b"\r\n".join(con._buffer).decode('utf-8')
oldfun(*args, **kwargs)
con._send_output = oldfun
return _send_output_hook
def httpraw(self, raw: str, **kwargs):
raw = raw.strip()
proxy = kwargs.get("proxy", None)
real_host = kwargs.get("real_host", None)
ssl = kwargs.get("ssl", False)
location = kwargs.get("location", True)
scheme = 'http'
port = 80
if ssl:
scheme = 'https'
port = 443
try:
index = raw.index('\n')
except ValueError:
raise Exception("ValueError")
log = {}
try:
method, path, protocol = raw[:index].split(" ")
except:
raise Exception("Protocol format error")
raw = raw[index + 1:]
try:
host_start = raw.index("Host: ")
host_end = raw.index('\n', host_start)
except ValueError:
raise ValueError("Host headers not found")
if real_host:
host = real_host
if ":" in real_host:
host, port = real_host.split(":")
else:
host = raw[host_start + len("Host: "):host_end]
if ":" in host:
host, port = host.split(":")
raws = raw.splitlines()
headers = {}
# index = 0
# for r in raws:
# raws[index] = r.lstrip()
# index += 1
index = 0
for r in raws:
if r == "":
break
try:
k, v = r.split(": ")
except:
k = r
v = ""
headers[k] = v
index += 1
# headers["Connection"] = "close"
if len(raws) < index + 1:
body = ''
else:
body = '\n'.join(raws[index + 1:]).lstrip()
urlinfo = scheme, host, int(port), path
try:
conn = self.httpcon.get_con(urlinfo, proxy=proxy)
except:
raise
conn._send_output = self._send_output(conn._send_output, conn, log)
try:
conn.putrequest(method, path, skip_host=True, skip_accept_encoding=True)
for k, v in headers.items():
conn.putheader(k, v)
if body and "Content-Length" not in headers and "Transfer-Encoding" not in headers:
length = conn._get_content_length(body, method)
conn.putheader("Content-Length", length)
conn.endheaders()
if body:
if headers.get("Transfer-Encoding", '').lower() == "chunked":
body = body.replace('\r\n', '\n')
body = body.replace('\n', '\r\n')
body = body + "\r\n" * 2
log["request"] += "\r\n" + body
conn.send(body.encode('utf-8'))
rep = conn.getresponse()
except socket.timeout:
raise HackError("socket connect timeout")
except socket.gaierror:
raise HackError("socket don't get hostname")
except KeyboardInterrupt:
raise HackError("user exit")
finally:
conn.close()
log["response"] = "HTTP/%.1f %d %s" % (
rep.version * 0.1, rep.status,
rep.reason) + '\r\n' + str(rep.msg)
if port == 80 or port == 443:
_url = "{scheme}://{host}{path}".format(scheme=scheme, host=host, path=path)
else:
_url = "{scheme}://{host}{path}".format(scheme=scheme, host=host + ":" + port, path=path)
redirect = rep.msg.get('location', None) # handle 301/302
if redirect and location:
if not redirect.startswith('http'):
redirect = parse.urljoin(_url, redirect)
return self.http(redirect, post=None, method=method, headers=headers, location=True, locationcount=1)
return response(rep, _url, log, )
def http(self, url, **kwargs):
method = kwargs.get("method", "GET")
post = kwargs.get("post", None) or kwargs.get("data", None)
location = kwargs.get('location', True)
locationcount = kwargs.get("locationcount", 0)
proxy = kwargs.get('proxy', None)
headers = kwargs.get('headers', {})
# real host:ip
real_host = kwargs.get("real_host", None)
if isinstance(headers, str):
headers = extract_dict(headers.strip(), '\n', ': ')
cookie = kwargs.get("cookie", None)
if cookie:
cookiestr = cookie
if isinstance(cookie, dict):
cookiestr = ""
for k, v in cookie.items():
cookiestr += "{}={}; ".format(k, v)
cookiestr = cookiestr.strip("; ")
headers["Cookie"] = cookiestr
for arg_key, h in [
('referer', 'Referer'),
('user_agent', 'User-Agent'), ]:
if kwargs.get(arg_key):
headers[h] = kwargs.get(arg_key)
if "Content-Length" in headers:
del headers["Content-Length"]
urlinfo = scheme, host, port, path = self._get_urlinfo(url, real_host)
log = {}
try:
conn = self.httpcon.get_con(urlinfo, proxy=proxy)
except:
raise
conn._send_output = self._send_output(conn._send_output, conn, log)
tmp_headers = copy.deepcopy(headers)
if post:
method = "POST"
if isinstance(post, str):
try:
post = extract_dict(post, sep="&")
except:
pass
try:
post = parse.urlencode(post)
except:
pass
tmp_headers["Content-Type"] = kwargs.get(
"Content-type", "application/x-www-form-urlencoded")
tmp_headers["Accept"] = tmp_headers.get("Accept", "*/*")
tmp_headers['Accept-Encoding'] = tmp_headers.get("Accept-Encoding", "gzip, deflate")
tmp_headers['Connection'] = 'close'
tmp_headers['User-Agent'] = tmp_headers['User-Agent'] if tmp_headers.get(
'User-Agent') else 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36'
try:
conn.request(method, path, post, tmp_headers)
rep = conn.getresponse()
# body = rep.read()
except socket.timeout:
raise HackError("socket connect timeout")
except socket.gaierror:
raise HackError("socket don't get hostname")
except KeyboardInterrupt:
raise HackError("user exit")
finally:
conn.close()
if post:
log["request"] += "\r\n\r\n" + post
log["response"] = "HTTP/%.1f %d %s" % (
rep.version * 0.1, rep.status,
rep.reason) + '\r\n' + str(rep.msg)
redirect = rep.msg.get('location', None) # handle 301/302
if redirect and location and locationcount < 10:
if not redirect.startswith('http'):
redirect = parse.urljoin(url, redirect)
return self.http(redirect, post=None, method=method, headers=tmp_headers, location=True,
locationcount=locationcount + 1)
if not redirect:
redirect = url
log["url"] = redirect
return response(rep, redirect, log, cookie)
class response(object):
def __init__(self, rep, redirect, log, oldcookie=''):
self.rep = rep
self.status_code = self.rep.status # response code
self.url = redirect
self._content = b''
_header_dict = dict()
self.cookie = ""
for k, v in self.rep.getheaders():
_header_dict[k] = v
# handle cookie
if k == "Set-Cookie":
if ";" in v:
self.cookie += v.strip().split(";")[0] + "; "
else:
self.cookie = v.strip() + "; "
if oldcookie:
cookie_dict = self._cookie_update(oldcookie, self.cookie)
self.cookie = ""
for k, v in cookie_dict.items():
self.cookie += "{}={}; ".format(k, v)
self.cookie = self.cookie.rstrip("; ")
try:
self.cookies = extract_dict(self.cookie, "; ", "=")
except:
self.cookies = {}
self.headers = _header_dict
self.header = str(self.rep.msg) # response header
self.log = log
charset = self.rep.msg.get('content-type', 'utf-8')
try:
self.charset = charset.split("charset=")[1]
except:
self.charset = "utf-8"
def content(self):
if self._content:
return self._content
encode = self.rep.msg.get('content-encoding', None)
try:
body = self.rep.read()
except socket.timeout:
body = b''
if encode == 'gzip':
body = gzip.decompress(body)
elif encode == 'deflate':
try:
body = zlib.decompress(body, -zlib.MAX_WBITS)
except:
body = zlib.decompress(body)
# redirect = self.rep.msg.get('location', None) # handle 301/302
self._content = body
return body
def text(self):
'''
:return: text
'''
body = self.content()
try:
text = body.decode(self.charset, 'ignore')
except:
text = str(body)
self.log["response"] += '\r\n' + text[:4096]
return text
def _cookie_update(self, old, new):
'''
用于更新旧cookie,与新cookie得出交集后返回新的cookie
:param old:旧cookie
:param new:新cookie
:return:Str:新cookie
'''
# 先将旧cookie转换为字典,再将新cookie转换为字典时覆盖旧cookie
old_sep = old.strip().split(";")
new_sep = new.strip().split(";")
cookie_dict = {}
for sep in old_sep:
if sep == "":
continue
try:
k, v = sep.split("=")
cookie_dict[k.strip()] = v
except:
continue
for sep in new_sep:
if sep == "":
continue
try:
k, v = sep.split("=")
cookie_dict[k.strip()] = v
except:
continue
return cookie_dict
class threadpool:
def __init__(self, threadnum, callback, timeout=10):
self.thread_count = self.thread_nums = threadnum
self.queue = queue.Queue()
con = httpcon(timeout=timeout)
self.hack = hackRequests(con)
self.isContinue = True
self.thread_count_lock = threading.Lock()
self._callback = callback
def push(self, payload):
self.queue.put(payload)
def changeThreadCount(self, num):
self.thread_count_lock.acquire()
self.thread_count += num
self.thread_count_lock.release()
def stop(self):
self.isContinue = False
def run(self):
th = []
for i in range(self.thread_nums):
t = threading.Thread(target=self.scan)
t.setDaemon(True)
t.start()
th.append(t)
# It can quit with Ctrl-C
try:
while 1:
if self.thread_count > 0 and self.isContinue:
time.sleep(0.01)
else:
break
except KeyboardInterrupt:
exit("User Quit")
def http(self, url, **kwargs):
func = self.hack.http
self.queue.put({"func": func, "url": url, "kw": kwargs})
def httpraw(self, raw: str, ssl: bool = False, proxy=None, location=True):
func = self.hack.httpraw
self.queue.put({"func": func, "raw": raw, "ssl": ssl,
"proxy": proxy, "location": location})
def scan(self):
while 1:
if self.queue.qsize() > 0 and self.isContinue:
p = self.queue.get()
else:
break
func = p.pop("func")
url = p.get("url", None)
try:
if url is None:
raw = p.pop('raw')
h = func(raw, **p)
else:
h = func(url, **p.get("kw"))
self._callback(h)
except Exception as e:
print(url, e)
self.changeThreadCount(-1)
def http(url, **kwargs):
# timeout = kwargs.get("timeout", 10)
# con = httpcon(timeout=timeout)
hack = hackRequests()
return hack.http(url, **kwargs)
def httpraw(raw: str, **kwargs):
# con = httpcon(timeout=timeout)
# hack = hackRequests(con)
hack = hackRequests()
return hack.httpraw(raw, **kwargs)
if __name__ == '__main__':
pass
|
email_cron.py | import threading
import time
import sys
import utils.logs as logs
import time
from utils.sendemail import send_email
from utils.config import get_value
from utils.db import Database_update
from API.scanstatus import scan_status
email_db = Database_update()
emails = send_email()
def send_email_notification():
time.sleep(20)
while True:
try:
schedule = get_value('config.property','SMTP','email_schedule')
records = email_db.db.email.find({})
for data in records:
notification = data['email_notification']
scan_id = data['scanid']
scan_result = scan_status(scan_id)
if notification == 'N' and scan_result == 'Completed':
try:
email = data['to_email']
email_result = emails.main_email(scan_id,email)
if email_result is False:
print "failed to connect to SMTP server"
return
email_db.db.email.update({'email_notification' : 'N'},{"$set" : {'email_notification' : 'Y'}})
except:
pass
time.sleep(int(schedule))
except Exception as e:
logs.logging.info("Invalid email schedule argument "+str(e))
sys.exit(1)
def email_start_cron():
email_notification = get_value('config.property','SMTP','email_notification')
if email_notification == "y" or email_notification == "Y":
# Start the thread
#time.sleep(20)
t = threading.Thread(target=send_email_notification)
t.deamon = True
t.start()
logs.logging.info("started")
else:
logs.logging.info("Email notification is not enabled") |
threaded_car_detection.py | import os
import cv2
import time
import glob
import threading
import skimage
import numpy as np
from car_detection.helpers import ParkedCarDetector
from .gps import GPS
# If there is no maximum capacity stored we set it to this
default_max_cap = 8
class ThreadedCarDetection:
def __init__(self, database, default_lat_lon, max_file_count):
# Configuration
self.db = database
self.default_lat_lon = default_lat_lon
self.max_file_count = max_file_count
# Detection mask and temporary images
self.process_image = None
self.mask_image = None
self.mask_ref_image = None
# The detector
self.detector = None
self.prev_recent_filename = None
self.gps = GPS()
# Thread control
self.kill_event = threading.Event()
self.on_calc_finished = threading.Event()
self.processing_thread = None
def load_mask_image(self, image_shape):
if os.path.isfile(self.db.mask_image_path):
self.mask_image = skimage.io.imread(self.db.mask_image_path)
else:
self.mask_image = np.full(
image_shape,
(255, 255, 255),
np.uint8)
if os.path.isfile(self.db.mask_ref_image_path):
self.mask_ref_image = skimage.io.imread(self.db.mask_ref_image_path)
else:
self.mask_ref_image = np.full(
image_shape,
(255, 255, 255),
np.uint8)
# Start up car detection (image processing) thread
def start_processing(self):
self.processing_thread = threading.Thread(target=self.processing_function)
self.processing_thread.start()
def stop_processing(self):
self.kill_event.set()
self.processing_thread.join(120)
def get_new_image(self):
search_text = self.db.raw_image_dir + "*" + self.db.image_file_ext
files = glob.glob(search_text)
files.sort(reverse=True)
index = 0
while True:
if index >= len(files) or files[index] == self.prev_recent_filename:
return None
try:
ret_image = skimage.io.imread(files[index])
self.prev_recent_filename = files[index]
return ret_image
except Exception as e:
# don't show this it looks alarming, but is qujite normal as image is probably halfway
# through being written to
# print("Failed to load raw image file: {} : {}".format(files[index], e.args))
index += 1
def prune_files_and_db(self):
# prune the processed image files
search_text = self.db.processed_image_dir + "*" + self.db.image_file_ext
files = glob.glob(search_text)
files.sort()
num_files_to_remove = max(0, len(files) - self.max_file_count)
for i in range(0, num_files_to_remove):
os.remove(files[i])
# prune the database
self.db.prune_image_table(self.max_file_count)
def processing_function(self):
print("Starting: Car detection thread")
max_cap = self.db.get_max_capacity()
if max_cap is None:
self.db.save_max_capacity(default_max_cap)
self.detector = ParkedCarDetector()
self.detector.enable_flicker_removal = False
image_count = 0
time.sleep(2)
last_time = time.time()
while not self.kill_event.wait(0):
if self.db.get_system_status("detection") == "paused":
print("Detection is paused")
time.sleep(1)
continue
# get GPS data if we have it
lat, lon = self.gps.GetValues()
if lat is not None and lon is not None:
self.db.save_lat_lon(lat, lon)
self.db.set_system_status("gps_source", "GPS Unit")
# Retrieve latest value fom database
lat, lon = self.db.get_lat_lon()
if (lat is None and lon is None) or self.db.get_system_status("gps_source") == "Command line":
lat, lon = self.default_lat_lon
self.db.save_lat_lon(lat, lon)
self.db.set_system_status("gps_source", "Command line")
this_time = time.time()
print("waiting for new image to be available: " + time.strftime('%Mm %Ss', time.gmtime(this_time - last_time)))
image = self.get_new_image()
# save this one out immediately in case it causes a cash we will have a record of it
if image is not None:
print("New image available - detecting vehicles...")
# Find contours ni the mask image so we can just focus in on that area
self.load_mask_image(image.shape)
greyscale_mask = cv2.cvtColor(self.mask_image, cv2.COLOR_RGB2GRAY)
image_right = greyscale_mask.shape[1] - 1
image_top = greyscale_mask.shape[0] -1
cv2.line(greyscale_mask, (0, 0), (0, image_top), (0, 0, 0), 1)
cv2.line(greyscale_mask, (0, image_top), (image_right, image_top), (0, 0, 0), 1)
cv2.line(greyscale_mask, (image_right, image_top), (image_right, 0), (0, 0, 0), 1)
cv2.line(greyscale_mask, (image_right, 0), (0, 0), (0, 0, 0), 1)
result = cv2.findContours(greyscale_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours, hierarchy = result if len(result) == 2 else result[1:3]
left = greyscale_mask.shape[1]
bottom = greyscale_mask.shape[0]
right = 0
top = 0
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
left = min(left, x)
right = max(right, x + w)
bottom = min(bottom, y)
top = max(top, y + h)
# If we don't have a masked area at all or it is too small, then just process the whole image
if right < left or top < bottom:
left = 0
bottom = 0
right = greyscale_mask.shape[1]
top = greyscale_mask.shape[0]
self.process_image = np.zeros((top-bottom, right-left, 3), np.uint8)
zoom_image = image[bottom:top, left:right]
zoom_image_mask = self.mask_image[bottom:top, left:right]
cv2.multiply(zoom_image, zoom_image_mask, self.process_image, 1 / 255.0)
# Do detections and visualise
start_detect_time = time.time()
results = self.detector.detect_cars(self.process_image)
end_detect_time = time.time()
print("Detection complete. Computation duration: " + format(time.strftime('%Mm %Ss', time.gmtime(end_detect_time - start_detect_time))))
bwImage = cv2.cvtColor(self.process_image, cv2.COLOR_RGB2GRAY)
bwImage = cv2.cvtColor(bwImage, cv2.COLOR_GRAY2RGB)
out_image = self.detector.image_visualise(bwImage, results)
print("Visualisation complete")
processed_filename = self.db.generate_processed_from_raw_path(self.prev_recent_filename)
skimage.io.imsave(processed_filename, out_image)
# Add images and data to database
total_count = len(results['class_ids'])
moving_count = sum(self.detector.moving) if self.detector.enable_flicker_removal else 0
free_spaces = self.db.get_max_capacity() - total_count
self.db.add_entry_no_save(
self.prev_recent_filename,
processed_filename,
total_count,
moving_count,
free_spaces,
lat,
lon)
self.prune_files_and_db()
image_count += 1
self.on_calc_finished.set()
last_time = time.time()
time.sleep(1)
|
webSocket2.py | import websocket
import json
import time
import threading
class WebsocketClient(object):
"""docstring for WebsocketClient"""
def __init__(self, address, message_callback=None):
super(WebsocketClient, self).__init__()
self.address = address
self.message_callback = message_callback
def on_message(self, ws, message):
# message = json.loads(message)
print("on_client_message:", message)
if self.message_callback:
self.message_callback(message)
def on_error(self, ws, error):
print("client error:", error)
def on_close(self, ws):
print("### client closed ###")
self.ws.close()
self.is_running = False
def on_open(self, ws):
self.is_running = True
print("on open")
def close_connect(self):
self.ws.close()
def send_message(self, message):
try:
self.ws.send(message)
except BaseException as err:
pass
def run(self):
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp(self.address,
on_message=lambda ws, message: self.on_message(ws, message),
on_error=lambda ws, error: self.on_error(ws, error),
on_close=lambda ws: self.on_close(ws))
self.ws.on_open = lambda ws: self.on_open(ws)
self.is_running = False
while True:
print(self.is_running)
if not self.is_running:
self.ws.run_forever()
time.sleep(3)
class WSClient(object):
def __init__(self, address, call_back):
super(WSClient, self).__init__()
self.client = WebsocketClient(address, call_back)
self.client_thread = None
def run(self):
self.client_thread = threading.Thread(target=self.run_client)
self.client_thread.start()
def run_client(self):
self.client.run()
def send_message(self, message):
self.client.send_message(message)
uri = "wss://stream.binance.com:9443/ws/BTCUSDT@kline_1m"
ws_client = WSClient(uri, lambda message: print("call_back message:", message))
ws_client.run()
time.sleep(3)
data = {
"method": "SUBSCRIBE",
"params":
[
"btcusdt@kline_1m"
],
"id": 1
}
ws_client.send_message(json.dumps(data, indent=4))
|
testCLSimServer.py | #!/usr/bin/env python
import sys
from icecube import clsim, icetray, dataclasses
# skip out if I3CLSimServer was not built
try:
clsim.I3CLSimServer
except AttributeError:
sys.exit(0)
import time
import tempfile
from multiprocessing import Process
try:
# PY2
from Queue import Queue
except ImportError:
# PY3
from queue import Queue
from numpy.random import uniform
from numpy.random import seed
seed(0)
from numpy import testing
icetray.logging.set_level('INFO')
def dummy_photon(step):
photon = clsim.I3CLSimPhoton()
for attr in 'x', 'y', 'z', 'theta', 'phi', 'time', 'weight', 'id':
setattr(photon, attr, getattr(step, attr))
photon.numScatters = 3
photon.omID = 52
photon.stringID = 23
return photon
def dummy_photon_history(photon):
history = clsim.I3CLSimPhotonHistory()
for i in range(photon.numScatters):
history.append(dataclasses.I3Position(i,i+0.5,i+3.14), i)
return history
class DummyConverter(clsim.I3CLSimStepToPhotonConverter):
def __init__(self):
super(DummyConverter, self).__init__()
self.input_queue = Queue()
def IsInitialized(self):
return True
def GetWorkgroupSize(self):
return 8
def GetMaxNumWorkitems(self):
return 64
def MorePhotonsAvailable(self):
return not self.input_queue.empty()
def EnqueueSteps(self, steps, id):
# ensure that EnqueueSteps blocks to test backpressure
time.sleep(0.1)
self.input_queue.put((steps, id))
def GetConversionResult(self):
steps, id = self.input_queue.get()
icetray.logging.log_debug('{} steps in bunch {}'.format(len(steps), id), unit='clsim.testCLSimServer')
photons = clsim.I3CLSimPhotonSeries([dummy_photon(step) for step in steps if step.num > 0])
history = clsim.I3CLSimPhotonHistorySeries(map(dummy_photon_history, photons))
return clsim.I3CLSimStepToPhotonConverter.ConversionResult_t(id, photons, history)
def test_client(client, num_steps, base=0):
from threading import Thread
input_steps = dict()
def feed():
for i in range(base,base+num_steps):
steps = clsim.I3CLSimStepSeries()
for _ in range(int(uniform(10*num_steps/10.))+1):
steps.append(clsim.I3CLSimStep())
steps[-1].pos = dataclasses.I3Position(*uniform(size=3))
steps[-1].dir = dataclasses.I3Direction(*uniform(size=3))
steps[-1].time = uniform()
steps[-1].weight = uniform()
steps[-1].num = 1
input_steps[i] = steps
icetray.logging.log_debug("submitting bunch {}/{} with size {}".format(i+1,num_steps,len(steps)), unit='clsim.testCLSimServer')
client.EnqueueSteps(steps, i)
if hasattr(client, 'EnqueueBarrier'):
client.EnqueueBarrier()
def drain():
for i in range(num_steps):
icetray.logging.log_debug("getting bunch {}/{}".format(i+1,num_steps), unit='clsim.testCLSimServer')
if hasattr(client, 'GetConversionResultWithBarrierInfo'):
result, barrier = client.GetConversionResultWithBarrierInfo()
else:
result, barrier = client.GetConversionResult(), i == num_steps-1
icetray.logging.log_debug("got bunch id {}: {} photons (barrier: {})".format(result.identifier, len(result.photons), barrier), unit='clsim.testCLSimServer')
input = input_steps[result.identifier]
try:
assert len(result.photons) == len(input)
except AssertionError:
icetray.logging.log_error("{} != {} in bunch {}".format(len(result.photons), len(input), result.identifier), unit='clsim.testCLSimServer')
raise
assert len(result.photonHistories) == len(input)
for step, photon, history in zip(input, result.photons, result.photonHistories):
testing.assert_equal( photon.numScatters, 3 )
testing.assert_equal( photon.omID, 52 )
testing.assert_equal( photon.stringID, 23 )
for attr in 'x', 'y', 'z', 'theta', 'phi', 'time', 'weight':
testing.assert_equal( getattr(step, attr), getattr(photon, attr), err_msg='{} not equal'.format(attr))
dummy_history = dummy_photon_history(photon)
testing.assert_equal( len(dummy_history), len(history) )
for pos, expected_pos in zip(history, dummy_history):
testing.assert_equal( pos, expected_pos )
if i == num_steps-1:
assert barrier
else:
assert not barrier
# drain in a thread, since EnqueueSteps() may block
t = Thread(target=drain)
t.start()
feed()
t.join()
icetray.logging.log_info("base {} done".format(base), unit='clsim.testCLSimServer')
# icetray.logging.set_level_for_unit('I3CLSimServer', 'TRACE')
# icetray.logging.set_level_for_unit('I3CLSimServer', 'DEBUG')
icetray.logging.set_level_for_unit('I3CLSimClient', 'TRACE')
icetray.logging.set_level_for_unit('clsim.testCLSimServer', 'TRACE')
# First, ensure that the test passes when the converter is called directly
test_client(DummyConverter(), 10)
# Now, call through the server in a separate process
converters = clsim.I3CLSimStepToPhotonConverterSeries([DummyConverter()])
server = clsim.I3CLSimServer('tcp://127.0.0.1:*',converters)
address = server.GetAddress()
def fire_a_few(num_steps=10, base=0):
# NB: the Python logging bridge deadlocks from secondary threads in Py3
if sys.version_info.major == 2:
icetray.logging.BASIC_FORMAT = "{} %(filename)s:%(lineno)s %(levelname)s: %(message)s".format(base)
icetray.logging.console()
icetray.logging.set_level_for_unit('clsim.testCLSimServer', 'TRACE')
icetray.logging.set_level_for_unit('I3CLSimClient', 'TRACE')
icetray.logging.log_debug("client {} connecting to {}".format(base, address), unit='clsim.testCLSimServer')
client = clsim.I3CLSimClient(address)
icetray.logging.log_debug("client {} connected".format(base), unit='clsim.testCLSimServer')
testing.assert_equal( client.workgroupSize, 8 )
testing.assert_equal( client.maxNumWorkitems, 64 )
test_client(client, num_steps, base)
procs = [Process(target=fire_a_few, kwargs=dict(num_steps=10, base=10*i)) for i in range(10)]
for p in procs:
p.start()
for p in procs:
p.join()
icetray.logging.log_info("process {} exited with status {}".format(p.pid, p.exitcode))
assert p.exitcode == 0
icetray.logging.log_info("going to destroy")
del server
icetray.logging.log_info("destroyed")
|
encrypter.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Matteo Bernardini
# Copyright (c) 2015 Marco Bonelli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, atexit, signal, threading, handle_args, tools
from getpass import getpass
from os.path import getsize
from datetime import timedelta
from time import time, sleep
CHUNK_SIZE = 20*2**10
CHUNK_N = 0
VERBOSE = False
QUIET = False
INTER_MODE = False
RUNNING = False
ABORTED = False
PROC = None
STATUS = {"coded_bytes":0,"progress":"N/A","time_left":"N/A"}
data_in = sys.stdin
data_out = sys.stdout
msg_out = sys.stderr
start_time = None
end_time = None
# INTERNAL HANDLERS #
def _log(stuff, exit_status=None, pref="\r"):
global RUNNING
stuff = "MBencrypter: "+stuff
stuff = stuff.split("\n")
w = tools.console_size()[0]
for i in xrange(len(stuff)):
stuff[i] = stuff[i]+(" "*(w-len(stuff[i])))
msg_out.write(pref+"\n".join(stuff)+"\n")
msg_out.flush()
if exit_status!=None:
RUNNING = False
sys.exit(exit_status)
def update_status(n=None,f=None):
global STATUS
if 'all_bytes' in STATUS:
STATUS["progress"] = "{0:.0%}".format(STATUS['coded_bytes']/float(STATUS['all_bytes']))
if start_time and STATUS['coded_bytes'] > 0:
velocity = STATUS['coded_bytes']/(time()-start_time)
bytes_left = STATUS['all_bytes']-STATUS['coded_bytes']
STATUS["time_left"] = str(timedelta(seconds=bytes_left/velocity)).split(".")[0]
if n:
out = []
for key in STATUS:
out.append("{0}: {1}".format(key, STATUS[key]))
_log("STATUS:\n"+"\n".join(out))
def show_progress():
while RUNNING:
update_status()
msg_out.write(("\r" if not VERBOSE else "")+("MBencrypter:" if not INTER_MODE else ">>")+" PROGRESS: ")
if "all_bytes" in STATUS:
msg_out.write("{0:>5} -- Estimated time left: {1}".format(STATUS["progress"], STATUS["time_left"]))
else:
msg_out.write("N/A")
msg_out.flush()
sleep(0.5)
def _clean():
data_in.close()
data_out.close()
if hasattr(signal, "SIGUSR1"): signal.signal(signal.SIGUSR1, update_status)
atexit.register(_clean)
####################################################
def _encrypter(do_encrypt, inp, out, key, hex_mode):
## INIZIALIZATION ##
global data_in, data_out, start_time, end_time, STATUS, RUNNING, CHUNK_N
if inp:
try:
data_in = open(inp,"rb")
STATUS['all_bytes'] = (getsize(inp)/2) if hex_mode else getsize(inp)
except IOError:
_log("ERROR: can't open input file! Aborting.", 1)
if out and (inp or out!=True):
try:
data_out = open((inp+".mbc" if do_encrypt else inp[:-4]) if out==True else out,"wb")
except IOError:
_log("ERROR: can't open output file! Aborting.", 1)
if not key:
while not key:
key = getpass("Please type key:\n> ")
start_time = time()
RUNNING = True
if PROC: PROC.start()
## OCT KEY SETTING ##
octKey = []
for c in key:
octKey += map(int, oct(ord(c))[1:])
if len(octKey)%2: octKey.append(0)
while True:
try:
chunk = data_in.read(CHUNK_SIZE)
if VERBOSE: _log("debug: read chunk #{0} of {1} bytes. Bytes read = {2}".format(CHUNK_N, CHUNK_SIZE, len(chunk)))
CHUNK_N += 1
if not chunk:
if VERBOSE: _log("\x07debug: breaking from while loop!")
break
if hex_mode and not do_encrypt:
chunk += data_in.read(CHUNK_SIZE)
LN = len(chunk)/2
bytecode = (int(chunk[b:b+2],16) for b in xrange(0,len(chunk),2))
else:
LN = len(chunk)
bytecode = (ord(byte) for byte in chunk)
except IOError:
_log("ERROR: error reading input file! Aborting.", 1)
## BIN KEY SETTING ##
binKey = []
for c in xrange(len(key)):
if (len(binKey) <= LN):
binKey.append(ord(key[c]))
else:
binKey[c % LN] ^= ord(key[c])
## ENCRYPTION PART ##
coded = []
for i in xrange(LN):
try: coded.append(bytecode.next())
except (ValueError, IndexError):
_log("ERROR: The input data is not valid hex! Aborting.", 1)
k = i%len(binKey)
# XOR - if encoding
if do_encrypt: coded[i] ^= binKey[k]
# MISC
rng = xrange(0, len(octKey), 2) if do_encrypt else xrange(-1, -len(octKey)-1, -2)
s = 1 if do_encrypt else -1
for j in rng:
if bool(coded[i] & 2**octKey[j]) ^ bool(coded[i] & 2**octKey[j+1*s]):
mask = 2**octKey[j] + 2**octKey[j+1*s]
coded[i] ^= mask
# XOR - if decoding
if not do_encrypt: coded[i] ^= binKey[k]
STATUS['coded_bytes'] += 1
if VERBOSE: _log("debug: {0} bytes encoded. {1} bytes encoded until now.".format(len(coded), STATUS['coded_bytes']))
## OUTPUT PART ##
try:
if hex_mode and do_encrypt:
data_out.write(''.join(map("{0:02X}".format, coded)))
else:
data_out.write(''.join(map(chr,coded)))
if VERBOSE: _log("debug: data successfully appended to output buffer.")
except IOError:
_log("ERROR: can't write to output file! Aborting.", 1)
end_time = time()
RUNNING = False
if VERBOSE: _log("debug: completed! elapsed time: "+str(timedelta(seconds=time()-start_time)))
#####################################
def main(args_from_interactive=None):
global VERBOSE, QUIET, STATUS, RUNNING, ABORTED, PROC
try:
args = handle_args.main(args_from_interactive)
VERBOSE = args.v
QUIET = args.q
if not QUIET:
PROC = threading.Thread(target=show_progress)
if args.e:
_encrypter(True, args.i, args.o, args.k, args.hex)
elif args.d:
_encrypter(False, args.i, args.o, args.k, args.hex)
if not QUIET:
msg_out.write(
("\r" if not VERBOSE else "") +
("MBencrypter:" if not INTER_MODE else ">>") +
" PROGRESS: DONE! -- Execution time: " +
str(timedelta(seconds=end_time-start_time)).split(".")[0] + "\n")
msg_out.flush()
except KeyboardInterrupt:
RUNNING = False
ABORTED = True
if INTER_MODE: raise
else: _log("Manually Interrupted!", 0, "\n")
if __name__ == "__main__": main()
|
child_process_executor.py | '''Facilities for running arbitrary commands in child processes.'''
import multiprocessing
import os
import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import six
from dagster import check
from dagster.utils import get_multiprocessing_context
from dagster.utils.error import serializable_error_info_from_exc_info
class ChildProcessEvent:
pass
class ChildProcessStartEvent(namedtuple('ChildProcessStartEvent', 'pid'), ChildProcessEvent):
pass
class ChildProcessDoneEvent(namedtuple('ChildProcessDoneEvent', 'pid'), ChildProcessEvent):
pass
class ChildProcessSystemErrorEvent(
namedtuple('ChildProcessSystemErrorEvent', 'pid error_info'), ChildProcessEvent
):
pass
class ChildProcessCommand(six.with_metaclass(ABCMeta)): # pylint: disable=no-init
'''Inherit from this class in order to use this library.
The object must be picklable; instantiate it and pass it to _execute_command_in_child_process.'''
@abstractmethod
def execute(self):
''' This method is invoked in the child process.
Yields a sequence of events to be handled by _execute_command_in_child_process.'''
class ChildProcessCrashException(Exception):
'''Thrown when the child process crashes.'''
def _execute_command_in_child_process(queue, command):
'''Wraps the execution of a ChildProcessCommand.
Handles errors and communicates across a queue with the parent process.'''
check.inst_param(command, 'command', ChildProcessCommand)
pid = os.getpid()
queue.put(ChildProcessStartEvent(pid=pid))
try:
for step_event in command.execute():
queue.put(step_event)
queue.put(ChildProcessDoneEvent(pid=pid))
except (Exception, KeyboardInterrupt): # pylint: disable=broad-except
queue.put(
ChildProcessSystemErrorEvent(
pid=pid, error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
finally:
queue.close()
TICK = 20.0 * 1.0 / 1000.0
'''The minimum interval at which to check for child process liveness -- default 20ms.'''
PROCESS_DEAD_AND_QUEUE_EMPTY = 'PROCESS_DEAD_AND_QUEUE_EMPTY'
'''Sentinel value.'''
def _poll_for_event(process, queue):
try:
return queue.get(block=True, timeout=TICK)
except KeyboardInterrupt as e:
return e
except multiprocessing.queues.Empty:
if not process.is_alive():
# There is a possibility that after the last queue.get the
# process created another event and then died. In that case
# we want to continue draining the queue.
try:
return queue.get(block=False)
except multiprocessing.queues.Empty:
# If the queue empty we know that there are no more events
# and that the process has died.
return PROCESS_DEAD_AND_QUEUE_EMPTY
return None
def execute_child_process_command(command):
'''Execute a ChildProcessCommand in a new process.
This function starts a new process whose execution target is a ChildProcessCommand wrapped by
_execute_command_in_child_process; polls the queue for events yielded by the child process
until the process dies and the queue is empty.
This function yields a complex set of objects to enable having multiple child process
executions in flight:
* None - nothing has happened, yielded to enable cooperative multitasking other iterators
* ChildProcessEvent - Family of objects that communicates state changes in the child process
* KeyboardInterrupt - Yielded in the case that an interrupt was recieved while
polling the child process. Yielded instead of raised to allow forwarding of the
interrupt to the child and completion of the iterator for this child and
any others that may be executing
* The actual values yielded by the child process command
Args:
command (ChildProcessCommand): The command to execute in the child process.
Warning: if the child process is in an infinite loop, this will
also infinitely loop.
'''
check.inst_param(command, 'command', ChildProcessCommand)
multiprocessing_context = get_multiprocessing_context()
queue = multiprocessing_context.Queue()
process = multiprocessing_context.Process(
target=_execute_command_in_child_process, args=(queue, command)
)
process.start()
completed_properly = False
while not completed_properly:
event = _poll_for_event(process, queue)
if event == PROCESS_DEAD_AND_QUEUE_EMPTY:
break
yield event
if isinstance(event, (ChildProcessDoneEvent, ChildProcessSystemErrorEvent)):
completed_properly = True
if not completed_properly:
# TODO Gather up stderr and the process exit code
raise ChildProcessCrashException()
process.join()
|
ssh.py | import logging
import os
import re
import shutil
import string
import sys
import tarfile
import tempfile
import threading
import time
from .. import term
from ..context import context
from ..log import getLogger
from ..timeout import Timeout
from ..util import hashes
from ..util import misc
from ..util import safeeval
from .process import process
from .sock import sock
# Kill the warning line:
# No handlers could be found for logger "paramiko.transport"
paramiko_log = logging.getLogger("paramiko.transport")
h = logging.StreamHandler(file('/dev/null','w+'))
h.setFormatter(logging.Formatter())
paramiko_log.addHandler(h)
log = getLogger(__name__)
class ssh_channel(sock):
#: Parent :class:`ssh` object
parent = None
#: Remote host
host = None
#: Return code, or ``None`` if the process has not returned
#: Use :meth:`poll` to check.
returncode = None
#: ``True`` if a tty was allocated for this channel
tty = False
#: Environment specified for the remote process, or ``None``
#: if the default environment was used
env = None
#: Command specified for the constructor
process = None
#: Working directory
cwd = None
#: PID of the process
#: Only valid when instantiated through :meth:`ssh.process`
pid = None
#: Executable of the process
#: Only valid when instantiated through :meth:`ssh.process`
exe = None
#: Arguments passed to the process
#: Only valid when instantiated through :meth:`ssh.process`
argv = None
def __init__(self, parent, process = None, tty = False, wd = None, env = None, timeout = Timeout.default):
super(ssh_channel, self).__init__(timeout)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.returncode = None
self.host = parent.host
self.tty = tty
self.env = env
self.process = process
self.cwd = wd
env = env or {}
msg = 'Opening new channel: %r' % ((process,) or 'shell')
if isinstance(process, (list, tuple)):
process = ' '.join(misc.sh_string(s) for s in process)
if process and wd:
process = "cd %s >/dev/null 2>&1; %s" % (misc.sh_string(wd), process)
if process and env:
for name, value in env.items():
if not re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', name):
log.error('run(): Invalid environment key $r' % name)
process = '%s=%s %s' % (name, misc.sh_string(value), process)
if process and tty:
process = 'stty raw -ctlecho -echo; ' + process
# If this object is enabled for DEBUG-level logging, don't hide
# anything about the command that's actually executed.
if process and log.isEnabledFor(logging.DEBUG):
msg = 'Opening new channel: %r' % ((process,) or 'shell')
with log.waitfor(msg) as h:
import paramiko
try:
self.sock = parent.transport.open_session()
except paramiko.ChannelException as e:
if e.args == (1, 'Administratively prohibited'):
log.error("Too many sessions open! Use ssh_channel.close() or 'with'!")
raise e
if self.tty:
self.sock.get_pty('xterm', term.width, term.height)
def resizer():
if self.sock:
self.sock.resize_pty(term.width, term.height)
self.resizer = resizer
term.term.on_winch.append(self.resizer)
else:
self.resizer = None
# Put stderr on stdout. This might not always be desirable,
# but our API does not support multiple streams
self.sock.set_combine_stderr(True)
self.settimeout(self.timeout)
if process:
self.sock.exec_command(process)
else:
self.sock.invoke_shell()
h.success()
def kill(self):
"""kill()
Kills the process.
"""
self.close()
def recvall(self, timeout = sock.forever):
# We subclass tubes.sock which sets self.sock to None.
#
# However, we need to wait for the return value to propagate,
# which may not happen by the time .close() is called by tube.recvall()
tmp_sock = self.sock
timeout = self.maximum if self.timeout is self.forever else self.timeout
data = super(ssh_channel, self).recvall(timeout)
# Restore self.sock to be able to call wait()
self.sock = tmp_sock
self.wait()
# Again set self.sock to None
self.sock = None
return data
def wait(self):
return self.poll(block=True)
def poll(self, block=False):
"""poll() -> int
Poll the exit code of the process. Will return None, if the
process has not yet finished and the exit code otherwise.
"""
if self.returncode == None and hasattr(self, 'sock') and self.sock \
and (block or self.sock.exit_status_ready()):
while not self.sock.status_event.is_set():
self.sock.status_event.wait(0.05)
self.returncode = self.sock.recv_exit_status()
return self.returncode
def can_recv_raw(self, timeout):
end = time.time() + timeout
while time.time() < end:
if self.sock.recv_ready():
return True
time.sleep(0.05)
return False
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
If not in TTY-mode, this does exactly the same as
meth:`pwnlib.tubes.tube.tube.interactive`, otherwise
it does mostly the same.
An SSH connection in TTY-mode will typically supply its own prompt,
thus the prompt argument is ignored in this case.
We also have a few SSH-specific hacks that will ideally be removed
once the :mod:`pwnlib.term` is more mature.
"""
# If we are only executing a regular old shell, we need to handle
# control codes (specifically Ctrl+C).
#
# Otherwise, we can just punt to the default implementation of interactive()
if self.process is not None:
return super(ssh_channel, self).interactive(prompt)
log.info('Switching to interactive mode')
# We would like a cursor, please!
term.term.show_cursor()
event = threading.Event()
def recv_thread(event):
while not event.is_set():
try:
cur = self.recv(timeout = 0.05)
if cur == None:
continue
elif cur == '\a':
# Ugly hack until term unstands bell characters
continue
sys.stdout.write(cur)
sys.stdout.flush()
except EOFError:
log.info('Got EOF while reading in interactive')
event.set()
break
t = context.Thread(target = recv_thread, args = (event,))
t.daemon = True
t.start()
while not event.is_set():
if term.term_mode:
try:
data = term.key.getraw(0.1)
except KeyboardInterrupt:
data = [3] # This is ctrl-c
except IOError:
if not event.is_set():
raise
else:
data = sys.stdin.read(1)
if not data:
event.set()
else:
data = [ord(data)]
if data:
try:
self.send(''.join(chr(c) for c in data))
except EOFError:
event.set()
log.info('Got EOF while sending in interactive')
while t.is_alive():
t.join(timeout = 0.1)
# Restore
term.term.hide_cursor()
def close(self):
self.poll()
while self.resizer in term.term.on_winch:
term.term.on_winch.remove(self.resizer)
super(ssh_channel, self).close()
def spawn_process(self, *args, **kwargs):
log.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
log.info('Closed SSH channel with %s' % self.host)
class ssh_connecter(sock):
def __init__(self, parent, host, port, timeout = Timeout.default):
super(ssh_connecter, self).__init__(timeout)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
self.rhost = host
self.rport = port
msg = 'Connecting to %s:%d via SSH to %s' % (self.rhost, self.rport, self.host)
with log.waitfor(msg) as h:
try:
self.sock = parent.transport.open_channel('direct-tcpip', (host, port), ('127.0.0.1', 0))
except Exception as e:
self.exception(e.message)
sockname = self.sock.get_transport().sock.getsockname()
self.lhost = sockname[0]
self.lport = sockname[1]
h.success()
def spawn_process(self, *args, **kwargs):
log.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
log.info("Closed remote connection to %s:%d via SSH connection to %s" % (self.rhost, self.rport, self.host))
class ssh_listener(sock):
def __init__(self, parent, bind_address, port, timeout = Timeout.default):
super(ssh_listener, self).__init__(timeout)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
try:
self.port = parent.transport.request_port_forward(bind_address, port)
except:
log.error('Failed create a port forwarding')
raise
def accepter():
msg = 'Waiting on port %d via SSH to %s' % (self.port, self.host)
with log.waitfor(msg) as h:
try:
self.sock = parent.transport.accept()
parent.transport.cancel_port_forward(bind_address, self.port)
except:
self.sock = None
self.exception('Failed to get a connection')
self.rhost, self.rport = self.sock.origin_addr
h.success('Got connection from %s:%d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def _close_msg(self):
log.info("Closed remote connection to %s:%d via SSH listener on port %d via %s" % (self.rhost, self.rport, self.port, self.host))
def spawn_process(self, *args, **kwargs):
log.error("Cannot use spawn_process on an SSH channel.""")
def wait_for_connection(self):
"""Blocks until a connection has been established."""
_ = self.sock
return self
def __getattr__(self, key):
if key == 'sock':
while self._accepter.is_alive():
self._accepter.join(timeout = 0.1)
return self.sock
else:
return getattr(super(ssh_listener, self), key)
class ssh(Timeout):
#: Remote host name (``str``)
host = None
#: Remote port (``int``)
port = None
#: Working directory (``str``)
cwd = None
#: Enable caching of SSH downloads (``bool``)
cache = True
#: Paramiko SSHClient which backs this object
client = None
#: Paramiko SFTPClient object which is used for file transfers.
#: Set to ``None`` to disable ``sftp``.
sftp = None
#: PID of the remote ``sshd`` process servicing this connection.
pid = None
def __init__(self, user, host, port = 22, password = None, key = None,
keyfile = None, proxy_command = None, proxy_sock = None,
timeout = Timeout.default, cache = True):
"""Creates a new ssh connection.
Arguments:
user(str): The username to log in with
host(str): The hostname to connect to
port(int): The port to connect to
password(str): Try to authenticate using this password
key(str): Try to authenticate using this private key. The string should be the actual private key.
keyfile(str): Try to authenticate using this private key. The string should be a filename.
proxy_command(str): Use this as a proxy command. It has approximately the same semantics as ProxyCommand from ssh(1).
proxy_sock(str): Use this socket instead of connecting to the host.
timeout: Timeout, in seconds
cache: Cache downloaded files (by hash/size/timestamp)
NOTE: The proxy_command and proxy_sock arguments is only available if a
fairly new version of paramiko is used."""
super(ssh, self).__init__(timeout)
self.host = host
self.port = port
self.user = user
self.password = password
self.key = key
self.keyfile = keyfile
self._cachedir = os.path.join(tempfile.gettempdir(), 'pwntools-ssh-cache')
self.cwd = '.'
self.cache = cache
misc.mkdir_p(self._cachedir)
import paramiko
# Make a basic attempt to parse the ssh_config file
try:
config_file = os.path.expanduser('~/.ssh/config')
if os.path.exists(config_file):
ssh_config = paramiko.SSHConfig()
ssh_config.parse(file(config_file))
host_config = ssh_config.lookup(host)
if 'hostname' in host_config:
self.host = host = host_config['hostname']
if not keyfile and 'identityfile' in host_config:
keyfile = host_config['identityfile'][0]
except Exception as e:
log.debug("An error occurred while parsing ~/.ssh/config:\n%s" % e)
keyfiles = [os.path.expanduser(keyfile)] if keyfile else []
msg = 'Connecting to %s on port %d' % (host, port)
with log.waitfor(msg) as h:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
known_hosts = os.path.expanduser('~/.ssh/known_hosts')
if os.path.exists(known_hosts):
self.client.load_host_keys(known_hosts)
has_proxy = (proxy_sock or proxy_command) and True
if has_proxy:
if 'ProxyCommand' not in dir(paramiko):
log.error('This version of paramiko does not support proxies.')
if proxy_sock and proxy_command:
log.error('Cannot have both a proxy command and a proxy sock')
if proxy_command:
proxy_sock = paramiko.ProxyCommand(proxy_command)
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True, sock = proxy_sock)
else:
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True)
self.transport = self.client.get_transport()
h.success()
try:
self.sftp = self.transport.open_sftp_client()
except Exception:
self.sftp = None
with context.local(log_level='error'):
try:
self.pid = int(self.system('echo $PPID').recv(timeout=1))
except Exception:
self.pid = None
def __enter__(self, *a):
return self
def __exit__(self, *a, **kw):
self.close()
def shell(self, shell = None, tty = True, timeout = Timeout.default):
"""shell(shell = None, tty = True, timeout = Timeout.default) -> ssh_channel
Open a new channel with a shell inside.
Arguments:
shell(str): Path to the shell program to run.
If ``None``, uses the default shell for the logged in user.
tty(bool): If ``True``, then a TTY is requested on the remote server.
Returns:
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.shell('/bin/sh')
>>> sh.sendline('echo Hello; exit')
>>> print 'Hello' in sh.recvall()
True
"""
return self.run(shell, tty, timeout = timeout)
def process(self, argv=None, executable=None, tty = True, cwd = None, env = None, timeout = Timeout.default, run = True,
stdin=0, stdout=1, stderr=2):
r"""
Executes a process on the remote server, in the same fashion
as pwnlib.tubes.process.process.
To achieve this, a Python script is created to call ``os.execve``
with the appropriate arguments.
As an added bonus, the ``ssh_channel`` object returned has a
``pid`` property for the process pid.
Arguments:
argv(list):
List of arguments to pass into the process
executable(str):
Path to the executable to run.
If ``None``, ``argv[0]`` is used.
tty(bool):
Request a `tty` from the server. This usually fixes buffering problems
by causing `libc` to write data immediately rather than buffering it.
However, this disables interpretation of control codes (e.g. Ctrl+C)
and breaks `.shutdown`.
cwd(str):
Working directory. If ``None``, uses the working directory specified
on :attr:`cwd` or set via :meth:`set_working_directory`.
env(dict):
Environment variables to set in the child. If ``None``, inherits the
default environment.
timeout(int):
Timeout to set on the `tube` created to interact with the process.
run(bool):
Set to ``True`` to run the program (default).
If ``False``, returns the path to an executable Python script on the
remote server which, when executed, will do it.
stdin(int, str):
If an integer, replace stdin with the numbered file descriptor.
If a string, a open a file with the specified path and replace
stdin with its file descriptor. May also be one of ``sys.stdin``,
``sys.stdout``, ``sys.stderr``. If ``None``, the file descriptor is closed.
stdout(int, str):
See ``stdin``.
stderr(int, str):
See ``stdin``.
Returns:
A new SSH channel, or a path to a script if ``run=False``.
Notes:
Requires Python on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.process('sh', env={'PS1':''})
>>> sh.sendline('echo Hello; exit')
>>> sh.recvall()
'Hello\n'
>>> s.process(['/bin/echo', '\xff']).recvall()
'\xff\n'
>>> s.process(['readlink', '/proc/self/exe']).recvall()
'/bin/readlink\n'
>>> s.process(['LOLOLOL', '/proc/self/exe'], executable='readlink').recvall()
'/bin/readlink\n'
>>> s.process(['LOLOLOL\x00', '/proc/self/cmdline'], executable='cat').recvall()
'LOLOLOL\x00/proc/self/cmdline\x00'
>>> sh = s.process(executable='/bin/sh')
>>> sh.pid in pidof('sh')
True
>>> s.process(['pwd'], cwd='/tmp').recvall()
'/tmp\n'
>>> p = s.process(['python','-c','import os; print os.read(2, 1024)'], stderr=0)
>>> p.send('hello')
>>> p.recv()
'hello\n'
>>> s.process(['/bin/echo', 'hello']).recvall()
'hello\n'
>>> s.process(['/bin/echo', 'hello'], stdout='/dev/null').recvall()
''
"""
if not argv and not executable:
log.error("Must specify argv or executable")
argv = argv or []
if isinstance(argv, (str, unicode)):
argv = [argv]
if not isinstance(argv, (list, tuple)):
log.error('argv must be a list or tuple')
# Python doesn't like when an arg in argv contains '\x00'
# -> execve() arg 2 must contain only strings
for i, arg in enumerate(argv):
if '\x00' in arg[:-1]:
log.error('Inappropriate nulls in argv[%i]: %r' % (i, arg))
argv[i] = arg.rstrip('\x00')
executable = executable or argv[0]
cwd = cwd or self.cwd or '.'
# Validate, since failures on the remote side will suck.
if not isinstance(executable, str):
log.error("executable / argv[0] must be a string: %r" % executable)
if not isinstance(argv, (list, tuple)):
log.error("argv must be a list or tuple: %r" % argv)
if env is not None and not isinstance(env, dict):
log.error("env must be a dict: %r") % env
if not all(isinstance(s, str) for s in argv):
log.error("argv must only contain strings: %r" % argv)
# Allow passing in sys.stdin/stdout/stderr objects
stdin = {sys.stdin: 0, sys.stdout:1, sys.stderr:2}.get(stdin, stdin)
stdout = {sys.stdin: 0, sys.stdout:1, sys.stderr:2}.get(stdout, stdout)
stderr = {sys.stdin: 0, sys.stdout:1, sys.stderr:2}.get(stderr, stderr)
script = r"""
#!/usr/bin/env python
import os, sys
exe = %r
argv = %r
env = %r
os.chdir(%r)
if env is None:
env = os.environ
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
PATH = os.environ['PATH'].split(os.pathsep)
if os.path.sep not in exe and not is_exe(exe):
for path in PATH:
test_path = os.path.join(path, exe)
if is_exe(test_path):
exe = test_path
break
if not is_exe(exe):
sys.stderr.write('0\n')
sys.stderr.write("{} is not executable or does not exist in {}".format(exe,PATH))
sys.exit(-1)
if sys.argv[-1] == 'check':
sys.stdout.write("1\n")
sys.stdout.write(str(os.getpid()) + "\n")
sys.stdout.flush()
for fd, newfd in {0: %r, 1: %r, 2:%r}.items():
if newfd is None:
close(fd)
elif isinstance(newfd, str):
os.close(fd)
os.open(newfd, os.O_RDONLY if fd == 0 else (os.O_RDWR|os.O_CREAT))
elif isinstance(newfd, int) and newfd != fd:
os.dup2(fd, newfd)
if newfd > 2:
os.close(newfd)
os.execve(exe, argv, env)
""" % (executable, argv, env, cwd, stdin, stdout, stderr)
script = script.lstrip()
log.debug("Created execve script:\n" + script)
if not run:
with context.local(log_level='error'):
tmpfile = self.mktemp('-t', 'pwnlib-execve-XXXXXXXXXX')
self.chmod('+x', tmpfile)
log.info("Uploading execve script to %r" % tmpfile)
self.upload_data(script, tmpfile)
return tmpfile
execve_repr = "execve(%r, %s, %s)" % (executable, argv, env or 'os.environ')
with log.progress('Opening new channel: %s' % execve_repr) as h:
script = misc.sh_string(script)
with context.local(log_level='error'):
python = self.run('test -x "$(which python 2>&1)" && exec python -c %s check; echo 2' % script)
result = safeeval.const(python.recvline())
# If an error occurred, try to grab as much output
# as we can.
if result != 1:
error_message = python.recvrepeat(timeout=1)
if result == 0:
log.error("%r does not exist or is not executable" % executable)
elif result == 2:
log.error("python is not installed on the remote system %r" % self.host)
elif result != 1:
h.failure("something bad happened:\n%s" % error_message)
python.pid = safeeval.const(python.recvline())
python.argv = argv
python.exe = executable
return python
def system(self, process, tty = True, wd = None, env = None, timeout = Timeout.default):
r"""system(process, tty = True, wd = None, env = None, timeout = Timeout.default) -> ssh_channel
Open a new channel with a specific process inside. If `tty` is True,
then a TTY is requested on the remote server.
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> py = s.run('python -i')
>>> _ = py.recvuntil('>>> ')
>>> py.sendline('print 2+2')
>>> py.sendline('exit')
>>> print repr(py.recvline())
'4\n'
"""
if wd is None:
wd = self.cwd
return ssh_channel(self, process, tty, wd, env, timeout)
#: Backward compatibility. Use :meth:`system`
run = system
def run_to_end(self, process, tty = False, wd = None, env = None):
r"""run_to_end(process, tty = False, timeout = Timeout.default, env = None) -> str
Run a command on the remote server and return a tuple with
(data, exit_status). If `tty` is True, then the command is run inside
a TTY on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print s.run_to_end('echo Hello; exit 17')
('Hello\n', 17)
"""
with context.local(log_level = 'ERROR'):
c = self.run(process, tty, wd = wd, timeout = Timeout.default)
data = c.recvall()
retcode = c.wait()
c.close()
return data, retcode
def connect_remote(self, host, port, timeout = Timeout.default):
r"""connect_remote(host, port, timeout = Timeout.default) -> ssh_connecter
Connects to a host through an SSH connection. This is equivalent to
using the ``-L`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_connecter` object.
Examples:
>>> from pwn import *
>>> l = listen()
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> a = s.connect_remote(s.host, l.lport)
>>> b = l.wait_for_connection()
>>> a.sendline('Hello')
>>> print repr(b.recvline())
'Hello\n'
"""
return ssh_connecter(self, host, port, timeout)
def listen_remote(self, port = 0, bind_address = '', timeout = Timeout.default):
r"""listen_remote(port = 0, bind_address = '', timeout = Timeout.default) -> ssh_connecter
Listens remotely through an SSH connection. This is equivalent to
using the ``-R`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_listener` object.
Examples:
>>> from pwn import *
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> l = s.listen_remote()
>>> a = remote(s.host, l.port)
>>> b = l.wait_for_connection()
>>> a.sendline('Hello')
>>> print repr(b.recvline())
'Hello\n'
"""
return ssh_listener(self, bind_address, port, timeout)
def __getitem__(self, attr):
"""Permits indexed access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print s['echo hello']
hello
"""
return self.__getattr__(attr)()
def __call__(self, attr):
"""Permits function-style access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print repr(s('echo hello'))
'hello'
"""
return self.__getattr__(attr)()
def __getattr__(self, attr):
"""Permits member access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.echo('hello')
'hello'
>>> s.whoami()
'travis'
>>> s.echo(['huh','yay','args'])
'huh yay args'
"""
bad_attrs = [
'trait_names', # ipython tab-complete
]
if attr in self.__dict__ \
or attr in bad_attrs \
or attr.startswith('_'):
raise AttributeError
def runner(*args):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
command = [attr] + args[0]
else:
command = ' '.join((attr,) + args)
return self.run(command).recvall().strip()
return runner
def connected(self):
"""Returns True if we are connected.
Example:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.connected()
True
>>> s.close()
>>> s.connected()
False
"""
return bool(self.client and self.client.get_transport().is_active())
def close(self):
"""Close the connection."""
if self.client:
self.client.close()
self.client = None
log.info("Closed connection to %r" % self.host)
def _libs_remote(self, remote):
"""Return a dictionary of the libraries used by a remote file."""
cmd = '(ulimit -s unlimited; ldd %s > /dev/null && (LD_TRACE_LOADED_OBJECTS=1 %s || ldd %s)) 2>/dev/null'
arg = misc.sh_string(remote)
data, status = self.run_to_end(cmd % (arg, arg, arg))
if status != 0:
log.error('Unable to find libraries for %r' % remote)
return {}
return misc.parse_ldd_output(data)
def _get_fingerprint(self, remote):
arg = misc.sh_string(remote)
cmd = '(openssl sha256 || sha256 || sha256sum) 2>/dev/null < %s' % (arg)
data, status = self.run_to_end(cmd)
if status != 0:
return None
# OpenSSL outputs in the format of...
# (stdin)= e3b0c4429...
data = data.replace('(stdin)= ','')
# sha256 and sha256sum outputs in the format of...
# e3b0c442... -
data = data.replace('-','')
return data.strip()
def _get_cachefile(self, fingerprint):
return os.path.join(self._cachedir, fingerprint)
def _verify_local_fingerprint(self, fingerprint):
if not set(fingerprint).issubset(string.hexdigits) or \
len(fingerprint) != 64:
log.error('Invalid fingerprint %r' % fingerprint)
return False
local = self._get_cachefile(fingerprint)
if not os.path.isfile(local):
return False
if hashes.sha256filehex(local) == fingerprint:
return True
else:
os.unlink(local)
return False
def _download_raw(self, remote, local, h):
def update(has, total):
h.status("%s/%s" % (misc.size(has), misc.size(total)))
if self.sftp:
self.sftp.get(remote, local, update)
return
total, exitcode = self.run_to_end('wc -c <' + misc.sh_string(remote))
if exitcode != 0:
h.failure("%r does not exist or is not accessible" % remote)
return
total = int(total)
with context.local(log_level = 'ERROR'):
c = self.run('cat ' + misc.sh_string(remote))
data = ''
while True:
try:
data += c.recv()
except EOFError:
break
update(len(data), total)
result = c.wait()
if result != 0:
h.failure('Could not download file %r (%r)' % (remote, result))
return
with open(local, 'w') as fd:
fd.write(data)
def _download_to_cache(self, remote, p):
with context.local(log_level='error'):
remote = self.readlink('-f',remote)
fingerprint = self._get_fingerprint(remote)
if fingerprint is None:
local = os.path.normpath(remote)
local = os.path.basename(local)
local += time.strftime('-%Y-%m-%d-%H:%M:%S')
local = os.path.join(self._cachedir, local)
self._download_raw(remote, local, p)
return local
local = self._get_cachefile(fingerprint)
if self.cache and self._verify_local_fingerprint(fingerprint):
p.success('Found %r in ssh cache' % remote)
else:
self._download_raw(remote, local, p)
if not self._verify_local_fingerprint(fingerprint):
p.failure('Could not download file %r' % remote)
return local
def download_data(self, remote):
"""Downloads a file from the remote server and returns it as a string.
Arguments:
remote(str): The remote filename to download.
Examples:
>>> with file('/tmp/bar','w+') as f:
... f.write('Hello, world')
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass',
... cache=False)
>>> s.download_data('/tmp/bar')
'Hello, world'
>>> s.sftp = False
>>> s.download_data('/tmp/bar')
'Hello, world'
"""
with log.progress('Downloading %r' % remote) as p:
with open(self._download_to_cache(remote, p)) as fd:
return fd.read()
def download_file(self, remote, local = None):
"""Downloads a file from the remote server.
The file is cached in /tmp/binjitsu-ssh-cache using a hash of the file, so
calling the function twice has little overhead.
Arguments:
remote(str): The remote filename to download
local(str): The local filename to save it to. Default is to infer it from the remote filename.
"""
if not local:
local = os.path.basename(os.path.normpath(remote))
if self.cwd and os.path.basename(remote) == remote:
remote = os.path.join(self.cwd, remote)
with log.progress('Downloading %r to %r' % (remote, local)) as p:
local_tmp = self._download_to_cache(remote, p)
# Check to see if an identical copy of the file already exists
if not os.path.exists(local) or hashes.sha256filehex(local_tmp) != hashes.sha256filehex(local):
shutil.copy2(local_tmp, local)
def download_dir(self, remote=None, local=None):
"""Recursively uploads a directory onto the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd or '.'
if self.sftp:
remote = str(self.sftp.normalize(remote))
else:
with context.local(log_level='error'):
remote = self.system('readlink -f %s' % remote)
dirname = os.path.dirname(remote)
basename = os.path.basename(remote)
local = local or '.'
local = os.path.expanduser(local)
log.info("Downloading %r to %r" % (basename,local))
with context.local(log_level='error'):
remote_tar = self.mktemp()
tar = self.system('tar -C %s -czf %s %s' % (dirname, remote_tar, basename))
if 0 != tar.wait():
log.error("Could not create remote tar")
local_tar = tempfile.NamedTemporaryFile(suffix='.tar.gz')
self.download_file(remote_tar, local_tar.name)
tar = tarfile.open(local_tar.name)
tar.extractall(local)
def upload_data(self, data, remote):
"""Uploads some data into a file on the remote server.
Arguments:
data(str): The data to upload.
remote(str): The filename to upload it to.
Examoles:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.upload_data('Hello, world', '/tmp/upload_foo')
>>> print file('/tmp/upload_foo').read()
Hello, world
>>> s.sftp = False
>>> s.upload_data('Hello, world', '/tmp/upload_bar')
>>> print file('/tmp/upload_bar').read()
Hello, world
"""
# If a relative path was provided, prepend the cwd
if os.path.normpath(remote) == os.path.basename(remote):
remote = os.path.join(self.cwd or '.', remote)
if self.sftp:
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self.sftp.put(f.name, remote)
return
with context.local(log_level = 'ERROR'):
s = self.run('cat>' + misc.sh_string(remote), tty=False)
s.send(data)
s.shutdown('send')
data = s.recvall()
result = s.wait()
if result != 0:
log.error("Could not upload file %r (%r)\n%s" % (remote, result, data))
def upload_file(self, filename, remote = None):
"""Uploads a file to the remote server. Returns the remote filename.
Arguments:
filename(str): The local filename to download
remote(str): The remote filename to save it to. Default is to infer it from the local filename."""
if remote == None:
remote = os.path.normpath(filename)
remote = os.path.basename(remote)
if self.cwd:
remote = os.path.join(self.cwd, remote)
with open(filename) as fd:
data = fd.read()
log.info("Uploading %r to %r" % (filename,remote))
self.upload_data(data, remote)
return remote
def upload_dir(self, local, remote=None):
"""Recursively uploads a directory onto the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd or '.'
local = os.path.expanduser(local)
dirname = os.path.dirname(local)
basename = os.path.basename(local)
if not os.path.isdir(local):
log.error("%r is not a directory" % local)
msg = "Uploading %r to %r" % (basename,remote)
with log.waitfor(msg) as w:
# Generate a tarfile with everything inside of it
local_tar = tempfile.mktemp()
with tarfile.open(local_tar, 'w:gz') as tar:
tar.add(local, basename)
# Upload and extract it
with context.local(log_level='error'):
remote_tar = self.mktemp('--suffix=.tar.gz')
self.upload_file(local_tar, remote_tar)
untar = self.run('cd %s && tar -xzf %s' % (remote, remote_tar))
message = untar.recvrepeat(2)
if untar.wait() != 0:
log.error("Could not untar %r on the remote end\n%s" % (remote_tar, message))
def upload(self, file_or_directory, remote=None):
if os.path.isfile(file_or_directory):
return self.upload_file(file_or_directory, remote)
if os.path.isdir(file_or_directory):
return self.upload_dir(file_or_directory, remote)
def download(self, file_or_directory, remote=None):
if not self.sftp:
log.error("Cannot determine remote file type without SFTP")
if 0 == self.system('test -d %s' % file_or_directory).wait():
self.download_dir(file_or_directory, remote)
else:
self.download_file(file_or_directory, remote)
put = upload
get = download
def libs(self, remote, directory = None):
"""Downloads the libraries referred to by a file.
This is done by running ldd on the remote server, parsing the output
and downloading the relevant files.
The directory argument specified where to download the files. This defaults
to './$HOSTNAME' where $HOSTNAME is the hostname of the remote server."""
libs = self._libs_remote(remote)
remote = self.readlink('-f',remote).strip()
libs[remote] = 0
if directory == None:
directory = self.host
directory = os.path.realpath(directory)
res = {}
seen = set()
for lib, addr in libs.items():
local = os.path.realpath(os.path.join(directory, '.' + os.path.sep + lib))
if not local.startswith(directory):
log.warning('This seems fishy: %r' % lib)
continue
misc.mkdir_p(os.path.dirname(local))
if lib not in seen:
self.download_file(lib, local)
seen.add(lib)
res[local] = addr
return res
def interactive(self, shell=None):
"""Create an interactive session.
This is a simple wrapper for creating a new
:class:`pwnlib.tubes.ssh.ssh_channel` object and calling
:meth:`pwnlib.tubes.ssh.ssh_channel.interactive` on it."""
s = self.shell(shell)
if self.cwd:
s.sendline('cd ' + misc.sh_string(self.cwd))
s.interactive()
s.close()
def set_working_directory(self, wd = None):
"""Sets the working directory in which future commands will
be run (via ssh.run) and to which files will be uploaded/downloaded
from if no path is provided
Note:
This uses ``mktemp -d`` under the covers, sets permissions
on the directory to ``0700``. This means that setuid binaries
will **not** be able to access files created in this directory.
In order to work around this, we also ``chmod +x`` the directory.
Arguments:
wd(string): Working directory. Default is to auto-generate a directory
based on the result of running 'mktemp -d' on the remote machine.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> cwd = s.set_working_directory()
>>> s.ls()
''
>>> s.pwd() == cwd
True
"""
status = 0
if not wd:
wd, status = self.run_to_end('x=$(mktemp -d) && cd $x && chmod +x . && echo $PWD', wd='.')
wd = wd.strip()
if status:
log.error("Could not generate a temporary directory (%i)\n%s" % (status, wd))
else:
_, status = self.run_to_end('ls ' + misc.sh_string(wd), wd = '.')
if status:
log.error("%r does not appear to exist" % wd)
log.info("Working directory: %r" % wd)
self.cwd = wd
return self.cwd
|
hazelcast_cloud_discovery_test.py | import ssl
import os
import threading
from hazelcast.six.moves import BaseHTTPServer
from hazelcast import six
from unittest import TestCase
from hazelcast.core import Address
from hazelcast.errors import HazelcastCertificationError
from hazelcast.discovery import HazelcastCloudDiscovery
from hazelcast.client import HazelcastClient
from tests.util import get_abs_path
TOKEN = "123abc456"
PRIVATE_LINK_TOKEN = "abc123def"
CLOUD_URL = HazelcastCloudDiscovery._CLOUD_URL_PATH
RESPONSE = """[
{"private-address":"10.47.0.8","public-address":"54.213.63.142:32298"},
{"private-address":"10.47.0.9","public-address":"54.245.77.185:32298"},
{"private-address":"10.47.0.10","public-address":"54.186.232.37:32298"}
]"""
PRIVATE_LINK_RESPONSE = """[
{"private-address":"100.96.5.1:5701","public-address":"10.113.44.139:31115"},
{"private-address":"100.96.4.2:5701","public-address":"10.113.44.130:31115"}
]"""
HOST = "localhost"
ADDRESSES = {
Address("10.47.0.8", 32298): Address("54.213.63.142", 32298),
Address("10.47.0.9", 32298): Address("54.245.77.185", 32298),
Address("10.47.0.10", 32298): Address("54.186.232.37", 32298),
}
PRIVATE_LINK_ADDRESSES = {
Address("100.96.5.1", 5701): Address("10.113.44.139", 31115),
Address("100.96.4.2", 5701): Address("10.113.44.130", 31115),
}
class CloudHTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
idx = self.path.find("=")
if idx > 0:
if self.path[: idx + 1] == CLOUD_URL:
# Found a cluster with the given token
token = self.path[idx + 1 :]
if token == TOKEN:
self._set_response(200, RESPONSE)
elif token == PRIVATE_LINK_TOKEN:
self._set_response(200, PRIVATE_LINK_RESPONSE)
# Can not find a cluster with the given token
else:
self._set_response(
404,
'{"message":"Cluster with token: ' + self.path[idx + 1 :] + ' not found."}',
)
else:
# Wrong URL
self._set_response(404, "default backend - 404")
def _set_response(self, status, message):
self.send_response(status)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(six.b(message))
class Server(object):
cur_dir = os.path.dirname(__file__)
def __init__(self):
self.server = BaseHTTPServer.HTTPServer((HOST, 0), CloudHTTPHandler)
self.server.socket = ssl.wrap_socket(
self.server.socket,
get_abs_path(self.cur_dir, "key.pem"),
get_abs_path(self.cur_dir, "cert.pem"),
server_side=True,
)
self.port = self.server.socket.getsockname()[1]
def start_server(self):
self.server.serve_forever()
def close_server(self):
self.server.shutdown()
class TestClient(HazelcastClient):
def _start(self):
# Let the client to initialize the cloud address provider and translator, don't actually start it.
pass
class HazelcastCloudDiscoveryTest(TestCase):
cur_dir = os.path.dirname(__file__)
@classmethod
def setUpClass(cls):
cls.ctx = ssl.create_default_context(cafile=get_abs_path(cls.cur_dir, "cert.pem"))
cls.server = Server()
cls.server_thread = threading.Thread(target=cls.server.start_server)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.server.close_server()
def test_found_response(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, TOKEN)
discovery._ctx = self.ctx
addresses = discovery.discover_nodes()
six.assertCountEqual(self, ADDRESSES, addresses)
def test_private_link_response(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, PRIVATE_LINK_TOKEN)
discovery._ctx = self.ctx
addresses = discovery.discover_nodes()
six.assertCountEqual(self, PRIVATE_LINK_ADDRESSES, addresses)
def test_not_found_response(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, "INVALID_TOKEN")
discovery._ctx = self.ctx
with self.assertRaises(IOError):
discovery.discover_nodes()
def test_invalid_url(self):
discovery = create_discovery(HOST, self.server.port, "/INVALID_URL", "")
discovery._ctx = self.ctx
with self.assertRaises(IOError):
discovery.discover_nodes()
def test_invalid_certificates(self):
discovery = create_discovery(HOST, self.server.port, CLOUD_URL, TOKEN)
with self.assertRaises(HazelcastCertificationError):
discovery.discover_nodes()
def test_client_with_cloud_discovery(self):
old = HazelcastCloudDiscovery._CLOUD_URL_BASE
try:
HazelcastCloudDiscovery._CLOUD_URL_BASE = "%s:%s" % (HOST, self.server.port)
client = TestClient(cloud_discovery_token=TOKEN)
client._address_provider.cloud_discovery._ctx = self.ctx
private_addresses, secondaries = client._address_provider.load_addresses()
six.assertCountEqual(self, list(ADDRESSES.keys()), private_addresses)
six.assertCountEqual(self, secondaries, [])
for private_address in private_addresses:
translated_address = client._address_provider.translate(private_address)
self.assertEqual(ADDRESSES[private_address], translated_address)
finally:
HazelcastCloudDiscovery._CLOUD_URL_BASE = old
def create_discovery(host, port, url, token, timeout=5.0):
discovery = HazelcastCloudDiscovery(token, timeout)
discovery._CLOUD_URL_BASE = "%s:%s" % (host, port)
discovery._CLOUD_URL_PATH = url
return discovery
|
app.py | from logging.config import dictConfig
from flask import Flask
from flask.blueprints import Blueprint
from flask_cors import CORS
from kafka_module.kf_service import process_fc_kf
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_error
import routes
import config
import time
import threading
from db.conmgr_mongo import connectmongo
tok_app = Flask(__name__)
def start_kafka():
try:
t1 = threading.Thread(target=process_fc_kf, name='keep_on_running')
t1.start()
log_info("multithread : Kafka running on multithread", None)
except Exception as e:
log_error("multithread : Error while running custom threads", None, e)
if config.ENABLE_CORS:
cors = CORS(tok_app, resources={r"/api/*": {"origins": "*"}})
for blueprint in vars(routes).values():
if isinstance(blueprint, Blueprint):
tok_app.register_blueprint(blueprint, url_prefix=config.API_URL_PREFIX)
if __name__ == "__main__":
start_kafka()
connectmongo()
tok_app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG)
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
})
|
namedpipe.py | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
from eventlet import patcher
from oslo_log import log as logging
from os_win._i18n import _
from os_win import constants
from os_win import exceptions
from os_win.utils.io import ioutils
from os_win.utils.winapi import constants as w_const
threading = patcher.original('threading')
time = patcher.original('time')
LOG = logging.getLogger(__name__)
class NamedPipeHandler(object):
"""Handles asynchronous I/O operations on a specified named pipe."""
_MAX_LOG_ROTATE_RETRIES = 5
def __init__(self, pipe_name, input_queue=None, output_queue=None,
connect_event=None, log_file=None):
self._pipe_name = pipe_name
self._input_queue = input_queue
self._output_queue = output_queue
self._log_file_path = log_file
self._connect_event = connect_event
self._stopped = threading.Event()
self._workers = []
self._pipe_handle = None
self._lock = threading.Lock()
self._ioutils = ioutils.IOUtils()
self._setup_io_structures()
def start(self):
try:
self._open_pipe()
if self._log_file_path:
self._log_file_handle = open(self._log_file_path, 'ab', 1)
jobs = [self._read_from_pipe]
if self._input_queue and self._connect_event:
jobs.append(self._write_to_pipe)
for job in jobs:
worker = threading.Thread(target=job)
worker.setDaemon(True)
worker.start()
self._workers.append(worker)
except Exception as err:
msg = (_("Named pipe handler failed to initialize. "
"Pipe Name: %(pipe_name)s "
"Error: %(err)s") %
{'pipe_name': self._pipe_name,
'err': err})
LOG.error(msg)
self.stop()
raise exceptions.OSWinException(msg)
def stop(self):
self._stopped.set()
# If any worker has been spawned already, we rely on it to have
# cleaned up the handles before ending its execution.
# Note that we expect the caller to synchronize the start/stop calls.
if not self._workers:
self._cleanup_handles()
for worker in self._workers:
# It may happen that another IO request was issued right after
# we've set the stopped event and canceled pending requests.
# In this case, retrying will ensure that the IO workers are
# stopped properly and that there are no more outstanding IO
# operations.
while (worker.is_alive() and
worker is not threading.current_thread()):
self._cancel_io()
worker.join(0.5)
self._workers = []
def _cleanup_handles(self):
self._close_pipe()
if self._log_file_handle:
self._log_file_handle.close()
self._log_file_handle = None
if self._r_overlapped.hEvent:
self._ioutils.close_handle(self._r_overlapped.hEvent)
self._r_overlapped.hEvent = None
if self._w_overlapped.hEvent:
self._ioutils.close_handle(self._w_overlapped.hEvent)
self._w_overlapped.hEvent = None
def _setup_io_structures(self):
self._r_buffer = self._ioutils.get_buffer(
constants.SERIAL_CONSOLE_BUFFER_SIZE)
self._w_buffer = self._ioutils.get_buffer(
constants.SERIAL_CONSOLE_BUFFER_SIZE)
self._r_overlapped = self._ioutils.get_new_overlapped_structure()
self._w_overlapped = self._ioutils.get_new_overlapped_structure()
self._r_completion_routine = self._ioutils.get_completion_routine(
self._read_callback)
self._w_completion_routine = self._ioutils.get_completion_routine()
self._log_file_handle = None
def _open_pipe(self):
"""Opens a named pipe in overlapped mode for asyncronous I/O."""
self._ioutils.wait_named_pipe(self._pipe_name)
self._pipe_handle = self._ioutils.open(
self._pipe_name,
desired_access=(w_const.GENERIC_READ | w_const.GENERIC_WRITE),
share_mode=(w_const.FILE_SHARE_READ | w_const.FILE_SHARE_WRITE),
creation_disposition=w_const.OPEN_EXISTING,
flags_and_attributes=w_const.FILE_FLAG_OVERLAPPED)
def _close_pipe(self):
if self._pipe_handle:
self._ioutils.close_handle(self._pipe_handle)
self._pipe_handle = None
def _cancel_io(self):
if self._pipe_handle:
# We ignore invalid handle errors. Even if the pipe is closed
# and the handle reused, by specifying the overlapped structures
# we ensure that we don't cancel IO operations other than the
# ones that we care about.
self._ioutils.cancel_io(self._pipe_handle, self._r_overlapped,
ignore_invalid_handle=True)
self._ioutils.cancel_io(self._pipe_handle, self._w_overlapped,
ignore_invalid_handle=True)
def _read_from_pipe(self):
self._start_io_worker(self._ioutils.read,
self._r_buffer,
self._r_overlapped,
self._r_completion_routine)
def _write_to_pipe(self):
self._start_io_worker(self._ioutils.write,
self._w_buffer,
self._w_overlapped,
self._w_completion_routine,
self._get_data_to_write)
def _start_io_worker(self, func, buff, overlapped_structure,
completion_routine, buff_update_func=None):
try:
while not self._stopped.isSet():
if buff_update_func:
num_bytes = buff_update_func()
if not num_bytes:
continue
else:
num_bytes = len(buff)
func(self._pipe_handle, buff, num_bytes,
overlapped_structure, completion_routine)
except Exception:
self._stopped.set()
finally:
with self._lock:
self._cleanup_handles()
def _read_callback(self, num_bytes):
data = self._ioutils.get_buffer_data(self._r_buffer,
num_bytes)
if self._output_queue:
self._output_queue.put(data)
if self._log_file_handle:
self._write_to_log(data)
def _get_data_to_write(self):
while not (self._stopped.isSet() or self._connect_event.isSet()):
time.sleep(1)
data = self._input_queue.get()
if data:
self._ioutils.write_buffer_data(self._w_buffer, data)
return len(data)
return 0
def _write_to_log(self, data):
if self._stopped.isSet():
return
try:
log_size = self._log_file_handle.tell() + len(data)
if log_size >= constants.MAX_CONSOLE_LOG_FILE_SIZE:
self._rotate_logs()
self._log_file_handle.write(data)
except Exception:
self._stopped.set()
def flush_log_file(self):
try:
self._log_file_handle.flush()
except (AttributeError, ValueError):
# We'll ignore errors caused by closed handles.
pass
def _rotate_logs(self):
self._log_file_handle.flush()
self._log_file_handle.close()
log_archive_path = self._log_file_path + '.1'
if os.path.exists(log_archive_path):
self._retry_if_file_in_use(os.remove,
log_archive_path)
self._retry_if_file_in_use(os.rename,
self._log_file_path,
log_archive_path)
self._log_file_handle = open(
self._log_file_path, 'ab', 1)
def _retry_if_file_in_use(self, f, *args, **kwargs):
# The log files might be in use if the console log is requested
# while a log rotation is attempted.
retry_count = 0
while True:
try:
return f(*args, **kwargs)
except WindowsError as err:
if (err.errno == errno.EACCES and
retry_count < self._MAX_LOG_ROTATE_RETRIES):
retry_count += 1
time.sleep(1)
else:
raise
|
utils.py | import threading
import queue
import traceback
from sys import stderr
from tkinter import Tk, Toplevel, PhotoImage
from .codegen import Codegen
guiCodegen = Codegen()
from platform import system as platformName #< for startFile
from subprocess import call as startSubProcess
def startFile(path:str):
name = platformName()
def run(prog): startSubProcess((prog, path))
if name == "Darwin": run("open")
elif name == "Windows":
__import__("os").startfile(path)
else: run("xdg-open") # POSIX
import os.path
def _getAssestDir():
return os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "assets"))
def _openIcon(name):
return PhotoImage(name, {"file": os.path.join(_getAssestDir(), name)})
def thunkify(op, kw_callback="callback", *args, **kwargs):
'''make a function with named callback param as thunk'''
def addCb(cb, kws):
kws[kw_callback] = cb
return kws
return lambda cb: op(*args, **addCb(kwargs, cb))
def thunkifySync(op, *args, **kwargs):
def callAsync(cb):
threading.Thread(target=lambda args1, kwargs1: cb(op(*args1, **kwargs1)), args=(args, kwargs) ).start()
return callAsync
MSG_CALL_FROM_THR_MAIN = "call from main thread."
MSG_CALLED_TWICE = "called twice"
NOT_THREADSAFE = RuntimeError("call initLooper() first")
TCL_CMD_POLLER = "teek_init_threads_queue_poller"
class BackendEnum():
def __init__(self, name:str, module_name:str):
self.name=name;self.module_name=module_name
def __eq__(self, other): return other.name == self.name
def __hash__(self): return self.name.__hash__()
def isAvaliable(self):
try: __import__(self.module_name); return True
except ImportError: return False
def use(self):
global guiBackend
if self.isAvaliable(): guiBackend = self
else: next(filter(BackendEnum.isAvaliable, Backend.fallbackOrder)).use()
def isUsed(self):
global guiBackend; return guiBackend == self
class Backend:
Tk = BackendEnum("tk", "tkinter")
TTk = BackendEnum("ttk", "tkinter.ttk")
ThemedTk = BackendEnum("themedtk", "ttkthemes")
Wx = BackendEnum("wx", "wx")
GTK = BackendEnum("gtk", "gi")
fallbackOrder = [GTK, Wx, ThemedTk, TTk, Tk]
guiBackend = Backend.TTk
class irange:
'''inclusive (float) range (first, last)+step, (start/stop fields) compatible with range
when used as irange(n), first defaults to 1, and last is inclusive!
'''
def __init__(self, first, last=None, step=1):
if step == 0: raise ValueError("step == 0")
self.first = first if last != None else 1; self.last = last if last != None else first; self.step = step
self.start = self.first; self.stop = self.last+(1 if self.step > 0 else -1)
def __repr__(self):
rep = "irange(%s, %s" %(self.first, self.last)
if self.step != 1: rep += ", %s" %self.step
return rep+")"
__str__ = __repr__
def __eq__(self, other): return self.first == other.first and self.last == other.last and self.step == other.step
def __iter__(self):
if isinstance(self.first, int) and isinstance(self.last, int) and isinstance(self.step, int):
return iter(range(self.start, self.stop, self.step))
return irange._iterator(self)
def __reversed__(self):
return irange(self.last, self.first, -self.step)
class _iterator:
def __init__(self, rng:"irange"):
self._rng = rng
self._i = rng.first
def __next__(self):
i = self._i; print(i)
stop = (i > self._rng.last) if (self._rng.step > 0) else (i < self._rng.last) # count down
if stop: raise StopIteration()
self._i = i + self._rng.step
return i
class EventCallback:
"""An object that calls functions. Use [bind] / [__add__] or [run]"""
def __init__(self):
self._callbacks = []
class CallbackBreak(Exception): pass
callbackBreak = CallbackBreak()
@staticmethod
def stopChain(): raise EventCallback.callbackBreak
def isIgnoredFrame(self, frame):
'''Is a stack trace frame ignored by [bind]'''
return False
def bind(self, op, args=(), kwargs={}):
"""Schedule `callback(*args, **kwargs) to [run]."""
stack = traceback.extract_stack()
while stack and self.isIgnoredFrame(stack[-1]): del stack[-1]
stack_info = "".join(traceback.format_list(stack))
self._callbacks.append((op, args, kwargs, stack_info))
def __add__(self, op):
self.bind(op); return self
def remove(self, op):
"""Undo a [bind] call. only [op] is used as its identity, args are ignored"""
idx_callbacks = len(self._callbacks) -1 # start from 0
for (i, cb) in enumerate(self._callbacks):
if cb[0] == op:
del self._callbacks[idx_callbacks-i]
return
raise ValueError("not bound: %r" %op)
def run(self) -> bool:
"""Run the connected callbacks(ignore result) and print errors. If one callback requested [stopChain], return False"""
for (op, args, kwargs, stack_info) in self._callbacks:
try: op(*args, **kwargs)
except EventCallback.CallbackBreak: return False
except Exception:
# it's important that this does NOT call sys.stderr.write directly
# because sys.stderr is None when running in windows, None.write is error
(trace, rest) = traceback.format_exc().split("\n", 1)
print(trace, file=stderr)
print(stack_info+rest, end="", file=stderr)
break
return True
class FutureResult:
'''pending operation result, use [getValue] / [getValueOr] to wait'''
def __init__(self):
self._cond = threading.Event()
self._value = None
self._error = None
def setValue(self, value):
self._value = value
self._cond.set()
def setError(self, exc):
self._error = exc
self._cond.set()
def getValueOr(self, on_error):
self._cond.wait()
if self._error != None: on_error(self._error)
return self._value
def getValue(self): return self.getValueOr(FutureResult.rethrow)
def fold(self, done, fail):
self._cond.wait()
return done(self._value) if self._error == None else fail(self._error)
@staticmethod
def rethrow(ex): raise ex
class EventPoller:
'''after-event loop operation dispatcher for Tk'''
def __init__(self):
assert threading.current_thread() is threading.main_thread()
self._main_thread_ident = threading.get_ident() #< faster than threading.current_thread()
self._init_looper_done = False
self._call_queue = queue.Queue() # (func, args, kwargs, future)
self.tk:Tk; self.on_quit:EventCallback
def isThreadMain(self): return threading.get_ident() == self._main_thread_ident
def initLooper(self, poll_interval_ms=(1_000//20) ):
assert self.isThreadMain(), MSG_CALL_FROM_THR_MAIN
assert not self._init_looper_done, MSG_CALLED_TWICE #< there is a race condition, but just ignore this
timer_id = None
def poller():
nonlocal timer_id
while True:
try: item = self._call_queue.get(block=False)
except queue.Empty: break
(func, args, kwargs, future) = item
try: value = func(*args, **kwargs)
except Exception as ex: future.setError(ex)
else: future.setValue(value)
timer_id = self.tk.tk.call("after", poll_interval_ms, TCL_CMD_POLLER)
self.tk.tk.createcommand(TCL_CMD_POLLER, poller)
def quit_cancel_poller():
if timer_id != None: self.tk.after_cancel(timer_id)
self.on_quit += quit_cancel_poller
poller()
self._init_looper_done = True
def callThreadSafe(self, op, args, kwargs) -> FutureResult:
if self.isThreadMain():
return op(*args, **kwargs)
if not self._init_looper_done: raise NOT_THREADSAFE
future = FutureResult()
self._call_queue.put((op, args, kwargs, future))
return future
|
cfncluster-release-check.py | #!/usr/bin/python
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy
# of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
# Build a cluster for each combination of region, base_os, and
# scheduler, and run a test script on each cluster. To avoid bouncing
# against limits in each region, the number of simultaneously built
# clusters in each region is a configuration parameter.
#
# NOTE:
# - This script requires python2
# - To simplify this script, at least one subnet in every region
# to be tested must have a resource tag named "CfnClusterTestSubnet"
# (value does not matter). That subnet will be used as the launch
# target for the cluster.
import os
import sys
import subprocess
import threading
import re
import argparse
import Queue
import boto3
#
# configuration
#
username_map = { 'alinux' : 'ec2-user',
'centos6' : 'centos',
'centos7' : 'centos',
'ubuntu1404' : 'ubuntu',
'ubuntu1604' : 'ubuntu' }
#
# global variables (sigh)
#
setup = {}
results_lock = threading.Lock()
failure = 0
success = 0
#
# run a single test, possibly in parallel
#
def run_test(region, distro, scheduler, key_name):
testname = '%s-%s-%s' % (region, distro, scheduler)
test_filename = "config-%s.cfg" % (testname)
sys.stdout.write("--> %s: Starting\n" % (testname))
file = open(test_filename, "w")
file.write("[aws]\n")
file.write("aws_region_name = %s\n" % region)
file.write("[cluster default]\n")
file.write("vpc_settings = public\n")
file.write("key_name = %s\n" % key_name)
file.write("base_os = %s\n" % distro)
file.write("master_instance_type = c4.xlarge\n")
file.write("compute_instance_type = c4.xlarge\n")
file.write("initial_queue_size = 2\n")
file.write("maintain_initial_size = true\n")
file.write("scheduler = %s\n" % (scheduler))
file.write("scaling_settings = custom\n")
file.write("[vpc public]\n")
file.write("master_subnet_id = %s\n" % (setup[region]['subnet']))
file.write("vpc_id = %s\n" % (setup[region]['vpc']))
file.write("[global]\n")
file.write("cluster_template = default\n")
file.write("[scaling custom]\n")
file.write("scaling_adjustment = 2\n")
file.close()
stdout_f = open('stdout-%s.txt' % (testname), 'w')
stderr_f = open('stderr-%s.txt' % (testname), 'w')
master_ip = ''
username = username_map[distro]
try:
# buld the cluster
subprocess.check_call(['cfncluster', '--config', test_filename,
'create', testname],
stdout=stdout_f, stderr=stderr_f)
# get the master ip, which means grepping through cfncluster status gorp
dump = subprocess.check_output(['cfncluster', 'status', testname], stderr=stderr_f)
dump_array = dump.splitlines()
for line in dump_array:
m = re.search('MasterPublicIP"="(.+?)"', line)
if m:
master_ip = m.group(1)
if master_ip == '':
print('!! %s: Master IP not found; aborting !!' % (testname))
raise Exception('Master IP not found')
print("--> %s master ip: %s" % (testname, master_ip))
# run test on the cluster...
subprocess.check_call(['scp', '-o', 'StrictHostKeyChecking=no',
'cluster-check.sh', '%s@%s:.' % (username, master_ip)],
stdout=stdout_f, stderr=stderr_f)
subprocess.check_call(['ssh', '-o', 'StrictHostKeyChecking=no',
'%s@%s' % (username, master_ip),
'/bin/bash cluster-check.sh %s' % (scheduler)],
stdout=stdout_f, stderr=stderr_f)
except Exception as e:
sys.stdout.write("!! FAILURE: %s!!\n" % (testname))
raise e
finally:
# clean up the cluster
subprocess.call(['cfncluster', '--config', test_filename, 'delete', testname],
stdout=stdout_f, stderr=stderr_f)
stdout_f.close()
stderr_f.close()
os.remove(test_filename)
#
# worker thread, there will be config['parallelism'] of these running
# per region, dispatching work from the work queue
#
def test_runner(region, q, key_name):
global success
global failure
global results_lock
while True:
item = q.get()
# just in case we miss an exception in run_test, don't abort everything...
try:
run_test(region=region, distro=item['distro'], scheduler=item['scheduler'], key_name=key_name)
retval = 0
except Exception as e:
print("Unexpected exception %s: %s" % (str(type(e)), str((e))))
retval = 1
results_lock.acquire(True)
if retval == 0:
success += 1
else:
failure += 1
results_lock.release()
q.task_done()
if __name__ == '__main__':
config = { 'parallelism' : 3,
'regions' : 'us-east-1,us-east-2,us-west-1,us-west-2,' +
'ca-central-1,eu-west-1,eu-west-2,eu-central-1,' +
'ap-southeast-1,ap-southeast-2,ap-northeast-1,' +
'ap-south-1,sa-east-1,eu-west-3',
'distros' : 'alinux,centos6,centos7,ubuntu1404,ubuntu1604',
'schedulers' : 'sge,slurm,torque' }
parser = argparse.ArgumentParser(description = 'Test runner for CfnCluster')
parser.add_argument('--parallelism', help = 'Number of tests per region to run in parallel',
type = int, default = 3)
parser.add_argument('--regions', help = 'Comma separated list of regions to test',
type = str)
parser.add_argument('--distros', help = 'Comma separated list of distributions to test',
type = str)
parser.add_argument('--schedulers', help = 'Comma separated list of schedulers to test',
type = str)
parser.add_argument('--key-name', help='Key Pair to use for SSH connections',
type = str)
for key, value in vars(parser.parse_args()).iteritems():
if not value == None:
config[key] = value
region_list = config['regions'].split(',')
distro_list = config['distros'].split(',')
scheduler_list = config['schedulers'].split(',')
print("==> Regions: %s" % (', '.join(region_list)))
print("==> Distros: %s" % (', '.join(distro_list)))
print("==> Schedulers: %s" % (', '.join(scheduler_list)))
print("==> Key Pair: %s" % (config['key_name']))
print("==> Parallelism: %d" % (config['parallelism']))
# Populate subnet / vpc data for all regions we're going to test.
for region in region_list:
client = boto3.client('ec2', region_name=region)
response = client.describe_tags(Filters=[{'Name': 'key',
'Values': [ 'CfnClusterTestSubnet' ]}],
MaxResults=16)
if len(response['Tags']) == 0:
print('Could not find subnet in %s with CfnClusterTestSubnet tag. Aborting.' %
(region))
exit(1)
subnetid = response['Tags'][0]['ResourceId']
response = client.describe_subnets(SubnetIds = [ subnetid ])
if len(response) == 0:
print('Could not find subnet info for %s' % (subnetid))
exit(1)
vpcid = response['Subnets'][0]['VpcId']
setup[region] = { 'vpc' : vpcid, 'subnet' : subnetid }
work_queues = {}
# build up a per-region list of work to do
for region in region_list:
work_queues[region] = Queue.Queue()
for distro in distro_list:
for scheduler in scheduler_list:
work_item = { 'distro' : distro, 'scheduler' : scheduler }
work_queues[region].put(work_item)
# start all the workers
for region in region_list:
for i in range(0, config['parallelism']):
t = threading.Thread(target = test_runner, args=(region, work_queues[region], config['key_name']))
t.daemon = True
t.start()
# wait for all the work queues to be completed in each region
for region in region_list:
work_queues[region].join()
# print status...
print("==> Success: %d" % (success))
print("==> Failure: %d" % (failure))
|
Coach.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
from six.moves import queue
import pickle
from pickle import Pickler, Unpickler
from random import shuffle
from parl.utils import tensorboard
import numpy as np
from tqdm import tqdm
import parl
from parl.utils import logger
from actor import Actor
from utils import split_group, get_test_dataset
from alphazero_agent import create_agent
class Coach():
"""
This class executes the self-play, learning and evaluating.
"""
def __init__(self, game, args):
self.game = game
self.args = args
# neural network of current generation
self.current_agent = create_agent(self.game)
# neural network of previous generation
self.previous_agent = create_agent(self.game)
# history of examples from args.numItersForTrainExamplesHistory latest iterations
self.trainExamplesHistory = []
self.remote_actors_signal_queues = []
self.remote_actors_return_queue = queue.Queue()
self.test_dataset = get_test_dataset()
def _run_remote_tasks(self, signal_queue, seed):
# The remote actor will actually run on the local machine or other machines of xparl cluster
remote_actor = Actor(self.game, self.args, seed)
while True:
# receive running task signal
# signal: specify task type and task input data (optional)
signal = signal_queue.get()
if signal["task"] == "self-play":
episode_num_each_actor = self.args.numEps // self.args.actors_num
result = remote_actor.self_play(
self.current_agent.get_weights(), episode_num_each_actor)
self.remote_actors_return_queue.put({"self-play": result})
elif signal["task"] == "pitting":
games_num_each_actor = self.args.arenaCompare // self.args.actors_num
result = remote_actor.pitting(
self.previous_agent.get_weights(),
self.current_agent.get_weights(), games_num_each_actor)
self.remote_actors_return_queue.put({"pitting": result})
elif signal["task"] == "evaluate_test_dataset":
test_dataset = signal["test_dataset"]
result = remote_actor.evaluate_test_dataset(
self.current_agent.get_weights(), test_dataset)
self.remote_actors_return_queue.put({
"evaluate_test_dataset":
result
})
else:
raise NotImplementedError
def _create_remote_actors(self):
# connect to xparl cluster to submit jobs
parl.connect(self.args.master_address)
for seed in range(self.args.actors_num):
signal_queue = queue.Queue()
self.remote_actors_signal_queues.append(signal_queue)
remote_thread = threading.Thread(
target=self._run_remote_tasks, args=(signal_queue, seed))
remote_thread.setDaemon(True)
remote_thread.start()
def learn(self):
"""Each iteration:
1. Performs numEps episodes of self-play.
2. Retrains neural network with examples in trainExamplesHistory
(which has a maximum length of numItersForTrainExamplesHistory).
3. Evaluates the new neural network with the test dataset.
4. Pits the new neural network against the old one and accepts it
only if it wins >= updateThreshold fraction of games.
"""
# create remote actors to run tasks (self-play/pitting/evaluate_test_dataset) in parallel.
self._create_remote_actors()
for iteration in range(1, self.args.numIters + 1):
logger.info('Starting Iter #{} ...'.format(iteration))
####################
logger.info('Step1: self-play in parallel...')
iterationTrainExamples = []
# update weights of remote actors to the latest weights, and ask them to run self-play task
for signal_queue in self.remote_actors_signal_queues:
signal_queue.put({"task": "self-play"})
# wait for all remote actors (a total of self.args.actors_num) to return the self-play results
for _ in range(self.args.actors_num):
result = self.remote_actors_return_queue.get()
iterationTrainExamples.extend(result["self-play"])
# save the iteration examples to the history
self.trainExamplesHistory.append(iterationTrainExamples)
if len(self.trainExamplesHistory
) > self.args.numItersForTrainExamplesHistory:
logger.warning("Removing the oldest entry in trainExamples.")
self.trainExamplesHistory.pop(0)
self.saveTrainExamples(iteration) # backup history to a file
####################
logger.info('Step2: train neural network...')
# shuffle examples before training
trainExamples = []
for e in self.trainExamplesHistory:
trainExamples.extend(e)
shuffle(trainExamples)
# training new network, keeping a copy of the old one
self.current_agent.save(
os.path.join(self.args.checkpoint, 'temp.pth.tar'))
self.previous_agent.restore(
os.path.join(self.args.checkpoint, 'temp.pth.tar'))
self.current_agent.learn(trainExamples)
####################
logger.info('Step3: evaluate test dataset in parallel...')
cnt = 0
# update weights of remote actors to the latest weights, and ask them to evaluate assigned test dataset
for i, data in enumerate(
split_group(
self.test_dataset,
len(self.test_dataset) // self.args.actors_num)):
self.remote_actors_signal_queues[i].put({
"task":
"evaluate_test_dataset",
"test_dataset":
data
})
cnt += len(data)
perfect_moves_cnt, good_moves_cnt = 0, 0
# wait for all remote actors (a total of self.args.actors_num) to return the evaluating results
for _ in range(self.args.actors_num):
(perfect_moves,
good_moves) = self.remote_actors_return_queue.get(
)["evaluate_test_dataset"]
perfect_moves_cnt += perfect_moves
good_moves_cnt += good_moves
logger.info('perfect moves rate: {}, good moves rate: {}'.format(
perfect_moves_cnt / cnt, good_moves_cnt / cnt))
tensorboard.add_scalar('perfect_moves_rate',
perfect_moves_cnt / cnt, iteration)
tensorboard.add_scalar('good_moves_rate', good_moves_cnt / cnt,
iteration)
####################
logger.info(
'Step4: pitting against previous generation in parallel...')
# transfer weights of previous generation and current generation to the remote actors, and ask them to pit.
for signal_queue in self.remote_actors_signal_queues:
signal_queue.put({"task": "pitting"})
previous_wins, current_wins, draws = 0, 0, 0
for _ in range(self.args.actors_num):
(pwins_, cwins_,
draws_) = self.remote_actors_return_queue.get()["pitting"]
previous_wins += pwins_
current_wins += cwins_
draws += draws_
logger.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' %
(current_wins, previous_wins, draws))
if previous_wins + current_wins == 0 or float(current_wins) / (
previous_wins + current_wins) < self.args.updateThreshold:
logger.info('REJECTING NEW MODEL')
self.current_agent.restore(
os.path.join(self.args.checkpoint, 'temp.pth.tar'))
else:
logger.info('ACCEPTING NEW MODEL')
self.current_agent.save(
os.path.join(self.args.checkpoint, 'best.pth.tar'))
self.current_agent.save(
os.path.join(self.args.checkpoint,
self.getCheckpointFile(iteration)))
def getCheckpointFile(self, iteration):
return 'checkpoint_' + str(iteration) + '.pth.tar'
def saveTrainExamples(self, iteration):
folder = self.args.checkpoint
if not os.path.exists(folder):
os.makedirs(folder)
filename = os.path.join(
folder,
self.getCheckpointFile(iteration) + ".examples")
with open(filename, "wb+") as f:
Pickler(f).dump(self.trainExamplesHistory)
f.closed
def loadModel(self):
self.current_agent.restore(
os.path.join(self.args.load_folder_file[0],
self.args.load_folder_file[1]))
def loadTrainExamples(self):
modelFile = os.path.join(self.args.load_folder_file[0],
self.args.load_folder_file[1])
examplesFile = modelFile + ".examples"
if not os.path.isfile(examplesFile):
logger.warning(
"File {} with trainExamples not found!".format(examplesFile))
r = input("Continue? [y|n]")
if r != "y":
sys.exit()
else:
logger.info("File with trainExamples found. Loading it...")
with open(examplesFile, "rb") as f:
self.trainExamplesHistory = Unpickler(f).load()
logger.info('Loading done!')
|
raspi0.py | #!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Joy detection demo."""
import argparse
import collections
import contextlib
import io
import logging
import math
import os
import queue
import serial
import signal
import sys
import threading
import time
from PIL import Image, ImageDraw, ImageFont
from picamera import PiCamera
import RPi.GPIO as GPIO
from time import sleep
from aiy.board import Board
from aiy.leds import Color, Leds, Pattern, PrivacyLed
from aiy.toneplayer import TonePlayer
from aiy.vision.inference import CameraInference
from aiy.vision.models import face_detection
from aiy.vision.streaming.server import StreamingServer
from aiy.vision.streaming import svg
logger = logging.getLogger(__name__)
JOY_COLOR = (255, 70, 0)
SAD_COLOR = (0, 0, 64)
JOY_SCORE_HIGH = 0.85
JOY_SCORE_LOW = 0.10
JOY_SOUND = ('C5q', 'E5q', 'C6q')
SAD_SOUND = ('C6q', 'E5q', 'C5q')
MODEL_LOAD_SOUND = ('C6w', 'c6w', 'C6w')
BEEP_SOUND = ('E6q', 'C6q')
FONT_FILE = '/usr/share/fonts/truetype/freefont/FreeSans.ttf'
BUZZER_GPIO = 22
GPIO.setmode(GPIO.BCM)
ser = serial.Serial('/dev/ttyACM1', 9600)
@contextlib.contextmanager
def stopwatch(message):
try:
logger.info('%s...', message)
begin = time.monotonic()
yield
finally:
end = time.monotonic()
logger.info('%s done. (%fs)', message, end - begin)
def run_inference(num_frames, on_loaded):
"""Yields (faces, (frame_width, frame_height)) tuples."""
with CameraInference(face_detection.model()) as inference:
on_loaded()
for result in inference.run(num_frames):
yield face_detection.get_faces(result), (result.width, result.height)
def threshold_detector(low_threshold, high_threshold):
"""Yields 'low', 'high', and None events."""
assert low_threshold < high_threshold
event = None
prev_score = 0.0
while True:
score = (yield event)
if score > high_threshold > prev_score:
event = 'high'
elif score < low_threshold < prev_score:
event = 'low'
else:
event = None
prev_score = score
def moving_average(size):
window = collections.deque(maxlen=size)
window.append((yield 0.0))
while True:
window.append((yield sum(window) / len(window)))
def average_joy_score(faces):
if faces:
return sum(face.joy_score for face in faces) / len(faces)
return 0.0
def draw_rectangle(draw, x0, y0, x1, y1, border, fill=None, outline=None):
assert border % 2 == 1
for i in range(-border // 2, border // 2 + 1):
draw.rectangle((x0 + i, y0 + i, x1 - i, y1 - i), fill=fill, outline=outline)
def scale_bounding_box(bounding_box, scale_x, scale_y):
x, y, w, h = bounding_box
return (x * scale_x, y * scale_y, w * scale_x, h * scale_y)
def svg_overlay(faces, frame_size, joy_score):
width, height = frame_size
doc = svg.Svg(width=width, height=height)
for face in faces:
x, y, w, h = face.bounding_box
doc.add(svg.Rect(x=int(x), y=int(y), width=int(w), height=int(h), rx=10, ry=10,
fill_opacity=0.3 * face.face_score,
style='fill:red;stroke:white;stroke-width:4px'))
doc.add(svg.Text('Joy: %.2f' % face.joy_score, x=x, y=y - 10,
fill='red', font_size=30))
doc.add(svg.Text('Faces: %d Avg. joy: %.2f' % (len(faces), joy_score),
x=10, y=50, fill='red', font_size=40))
return str(doc)
class Service:
def __init__(self):
self._requests = queue.Queue()
self._thread = threading.Thread(target=self._run, daemon=True)
self._thread.start()
def _run(self):
while True:
request = self._requests.get()
if request is None:
self.shutdown()
break
self.process(request)
self._requests.task_done()
def process(self, request):
pass
def shutdown(self):
pass
def submit(self, request):
self._requests.put(request)
def close(self):
self._requests.put(None)
self._thread.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
class Player(Service):
"""Controls buzzer."""
def __init__(self, gpio, bpm):
super().__init__()
self._toneplayer = TonePlayer(gpio, bpm)
def process(self, sound):
self._toneplayer.play(*sound)
def play(self, sound):
self.submit(sound)
class Photographer(Service):
"""Saves photographs to disk."""
def __init__(self, format, folder):
super().__init__()
assert format in ('jpeg', 'bmp', 'png')
self._font = ImageFont.truetype(FONT_FILE, size=25)
self._faces = ([], (0, 0))
self._format = format
self._folder = folder
def _make_filename(self, timestamp, annotated):
path = '%s/%s_annotated.%s' if annotated else '%s/%s.%s'
return os.path.expanduser(path % (self._folder, timestamp, self._format))
def _draw_face(self, draw, face, scale_x, scale_y):
x, y, width, height = scale_bounding_box(face.bounding_box, scale_x, scale_y)
text = 'Joy: %.2f' % face.joy_score
_, text_height = self._font.getsize(text)
margin = 3
bottom = y + height
text_bottom = bottom + margin + text_height + margin
draw_rectangle(draw, x, y, x + width, bottom, 3, outline='white')
draw_rectangle(draw, x, bottom, x + width, text_bottom, 3, fill='white', outline='white')
draw.text((x + 1 + margin, y + height + 1 + margin), text, font=self._font, fill='black')
def process(self, message):
if isinstance(message, tuple):
self._faces = message
return
camera = message
timestamp = time.strftime('%Y-%m-%d_%H.%M.%S')
stream = io.BytesIO()
with stopwatch('Taking photo'):
camera.capture(stream, format=self._format, use_video_port=True)
filename = self._make_filename(timestamp, annotated=False)
with stopwatch('Saving original %s' % filename):
stream.seek(0)
with open(filename, 'wb') as file:
file.write(stream.read())
faces, (width, height) = self._faces
if faces:
filename = self._make_filename(timestamp, annotated=True)
with stopwatch('Saving annotated %s' % filename):
stream.seek(0)
image = Image.open(stream)
draw = ImageDraw.Draw(image)
scale_x, scale_y = image.width / width, image.height / height
for face in faces:
self._draw_face(draw, face, scale_x, scale_y)
del draw
image.save(filename)
def update_faces(self, faces):
self.submit(faces)
def shoot(self, camera):
self.submit(camera)
class Animator(Service):
"""Controls RGB LEDs."""
def __init__(self, leds):
super().__init__()
self._leds = leds
def process(self, joy_score):
if joy_score > 0:
self._leds.update(Leds.rgb_on(Color.blend(JOY_COLOR, SAD_COLOR, joy_score)))
else:
self._leds.update(Leds.rgb_off())
def shutdown(self):
self._leds.update(Leds.rgb_off())
def update_joy_score(self, joy_score):
self.submit(joy_score)
def joy_detector(num_frames, preview_alpha, image_format, image_folder,
enable_streaming, streaming_bitrate, mdns_name):
done = threading.Event()
def stop():
logger.info('Stopping...')
done.set()
signal.signal(signal.SIGINT, lambda signum, frame: stop())
signal.signal(signal.SIGTERM, lambda signum, frame: stop())
logger.info('Starting...')
with contextlib.ExitStack() as stack:
leds = stack.enter_context(Leds())
board = stack.enter_context(Board())
player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10))
photographer = stack.enter_context(Photographer(image_format, image_folder))
animator = stack.enter_context(Animator(leds))
# Forced sensor mode, 1640x1232, full FoV. See:
# https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
# This is the resolution inference run on.
# Use half of that for video streaming (820x616).
camera = stack.enter_context(PiCamera(sensor_mode=4, resolution=(820, 616)))
stack.enter_context(PrivacyLed(leds))
server = None
if enable_streaming:
server = stack.enter_context(StreamingServer(camera, bitrate=streaming_bitrate,
mdns_name=mdns_name))
def model_loaded():
logger.info('Model loaded.')
player.play(MODEL_LOAD_SOUND)
def take_photo():
logger.info('Button pressed.')
player.play(BEEP_SOUND)
photographer.shoot(camera)
if preview_alpha > 0:
camera.start_preview(alpha=preview_alpha)
board.button.when_pressed = take_photo
joy_moving_average = moving_average(10)
joy_moving_average.send(None) # Initialize.
joy_threshold_detector = threshold_detector(JOY_SCORE_LOW, JOY_SCORE_HIGH)
joy_threshold_detector.send(None) # Initialize.
for faces, frame_size in run_inference(num_frames, model_loaded):
photographer.update_faces((faces, frame_size))
joy_score = joy_moving_average.send(average_joy_score(faces))
animator.update_joy_score(joy_score)
event = joy_threshold_detector.send(joy_score)
if event == 'high':
logger.info('High joy detected.')
player.play(JOY_SOUND)
ser.write('2') # writes 2 is happy
elif event == 'low':
logger.info('Low joy detected.')
player.play(SAD_SOUND)
ser.write('1') # writes 1 if sad
else:
ser.write('0')
if server:
server.send_overlay(svg_overlay(faces, frame_size, joy_score))
if done.is_set():
break
def preview_alpha(string):
value = int(string)
if value < 0 or value > 255:
raise argparse.ArgumentTypeError('Must be in [0...255] range.')
return value
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num_frames', '-n', type=int, default=None,
help='Number of frames to run for')
parser.add_argument('--preview_alpha', '-pa', type=preview_alpha, default=0,
help='Video preview overlay transparency (0-255)')
parser.add_argument('--image_format', default='jpeg',
choices=('jpeg', 'bmp', 'png'),
help='Format of captured images')
parser.add_argument('--image_folder', default='~/Pictures',
help='Folder to save captured images')
parser.add_argument('--blink_on_error', default=False, action='store_true',
help='Blink red if error occurred')
parser.add_argument('--enable_streaming', default=False, action='store_true',
help='Enable streaming server')
parser.add_argument('--streaming_bitrate', type=int, default=1000000,
help='Streaming server video bitrate (kbps)')
parser.add_argument('--mdns_name', default='',
help='Streaming server mDNS name')
args = parser.parse_args()
try:
joy_detector(args.num_frames, args.preview_alpha, args.image_format, args.image_folder,
args.enable_streaming, args.streaming_bitrate, args.mdns_name)
except KeyboardInterrupt:
GPIO.cleanup()
except Exception:
logger.exception('Exception while running joy demo.')
if args.blink_on_error:
with Leds() as leds:
leds.pattern = Pattern.blink(100) # 10 Hz
leds.update(Leds.rgb_pattern(Color.RED))
time.sleep(1.0)
return 0
if __name__ == '__main__':
sys.exit(main()) |
motelist.py | #!/usr/bin/env python
import os, sys, threading, time, itertools
import serial
try:
# For Python 3+
from urllib.request import urlopen
except ImportError:
# For Python 2.x
from urllib2 import urlopen
import configfile
if os.name == 'posix':
from motelist_src.get_ports_linux import comports # @UnusedImport
elif os.name == "cygwin":
from motelist_src.get_ports_cygwin import comports # @Reimport @UnusedImport
elif os.name == "nt":
from motelist_src.get_ports_win import comports # @Reimport @UnusedImport
elif os.name == 'darwin': # OS X (confirmed) TODO: Not tested
from motelist_src.get_ports_mac import comports # @Reimport
else:
print ("Your OS ('{}') is not supported!".format(os.name))
exit()
# Unified way of accessing motes
class Mote(object):
def __init__(self, mote, manualyAdded = False):
if mote == None:
self.__port = None
self.__name = "No motes found!"
self.__reference = "Make sure mote(s) are connected and drivers are installed."
self.__host = None
self.__userdata = None
self.__manualyAdded = manualyAdded
elif len(mote) == 3:
self.__port = mote[0]
self.__name = mote[1]
self.__reference = mote[2]
self.__host = "Local"
self.__userdata = None
self.__manualyAdded = manualyAdded
elif len(mote) == 4:
self.__port = mote[0]
self.__name = mote[1]
self.__reference = mote[2]
self.__host = mote[3]
self.__userdata = None
self.__manualyAdded = manualyAdded
else:
print ("Failed to initialize mote from " + str(mote))
def getNiceName(self):
if self.__host is None:
if self.__name.find(self.__port) != -1:
return "{}".format(self.__name)
else:
return "{}({})".format(self.__name, self.__port)
else:
if self.__name.find(self.__port) != -1:
return "{} @ {}".format(self.__name, self.__host)
else:
return "{}({}) @ {}".format(self.__name, self.__port, self.__host)
def getFullName(self):
return "{} [{}]".format(self.getNiceName(), self.__reference)
def getCSVData(self):
if self.__host is None:
return "{},{},{}".format(self.__reference, self.__port, self.__name)
else:
return "{},{},{},{}".format(self.__reference, self.__port, self.__name, self.__host)
def isUserMote(self):
return self.__manualyAdded
def setUserData(self, userData):
self.__userdata = userData
def getUserData(self):
return self.__userdata
def getPort(self):
return self.__port
def getHost(self):
if self.__host is None:
return ''
else:
return self.__host
def isLocal(self):
return self.__host is None \
or len(self.__host) == 0 \
or self.__host == "Local"
def getName(self):
return self.__name
def getReference(self):
return self.__reference
def cmp(self, other):
host1 = self.getHost()
host2 = other.getHost()
if host1 == "Local": host1 = ""
if host2 == "Local": host2 = ""
if host1.find("://") != -1: host1 = host1[host1.find("://") + 3:]
if host2.find("://") != -1: host2 = host2[host2.find("://") + 3:]
if host1 == host2:
if self.getPort() == other.getPort(): return 0
if self.getPort() < other.getPort(): return -1
return 1
if host1 < host2: return -1
return 1
def __lt__(self, other):
return self.cmp(other) < 0
def __gt__(self, other):
return self.cmp(other) > 0
def __le__(self, other):
return self.cmp(other) <= 0
def __ge__(self, other):
return self.cmp(other) >= 0
def __eq__(self, other):
# Makes equal work on different mote classes
if type(other) is not type(self): return False
return self.cmp(other) == 0
def __ne__(self, other):
if type(other) is not type(self): return True
return self.cmp(other) != 0
def getRemoteServers():
retVal = list()
try:
bslProxy = os.environ['BSLPROXY']
if bslProxy is not None and bslProxy != '':
retVal.append(bslProxy)
except:
pass
try:
cfg = configfile.ConfigFile("remoteServers.cfg")
cfg.load()
retVal += cfg.getCfgValueAsList("remoteServers")
except:
pass
return retVal
class Motelist(object):
motes = list()
lock = threading.Lock()
updateCallbacks = list()
remoteServerList = getRemoteServers()
infinite = False
@staticmethod
def initialize(updateCallbacks, startPeriodicUpdate = False, onlyLocalMotes = False):
if updateCallbacks is None:
return
if type(updateCallbacks) is list:
Motelist.updateCallbacks = updateCallbacks
else:
Motelist.updateCallbacks.append(updateCallbacks)
if startPeriodicUpdate:
Motelist.startPeriodicUpdate()
if onlyLocalMotes:
Motelist.remoteServerList = list()
@staticmethod
def addMote(port, name, reference):
Motelist.lock.acquire()
portFound = not Motelist.portExists(port)
for mote in Motelist.motes:
if mote.getPort().lower() == port.lower():
portFound = True
break
if not portFound:
Motelist.motes.append(Mote([port, name, reference], True))
Motelist.lock.release()
if not portFound:
Motelist.__activateCallbacks(True)
return not portFound
@staticmethod
def recreateMoteList(iterator):
Motelist.lock.acquire()
newMotes = list()
haveNewMote = False
for host in Motelist.remoteServerList:
iterator = itertools.chain(iterator, Motelist.getRemoteMotelist(host))
for mote in iterator:
# this filters out fake motes on linux, i hope!
if mote[2] == "n/a":
continue
# this filters out some ACPI devices
if "ACPI" in mote[2]:
continue
newMote = Mote(mote)
for m in newMotes:
if newMote == m:
newMote = None
break;
if newMote is None:
continue
# Add if no such mote exists, point to it otherwise
if newMote not in Motelist.motes:
newMotes.insert(0, newMote)
haveNewMote = True
else:
newMotes.insert(0, Motelist.motes[Motelist.motes.index(newMote)])
for mote in Motelist.motes:
if mote.isUserMote() and mote not in newMotes:
newMotes.append(mote)
haveNewMote = haveNewMote or not len(Motelist.motes) == len(newMotes)
Motelist.motes = sorted(newMotes)
Motelist.lock.release()
return haveNewMote
@staticmethod
def getRemoteMotelist(host):
retVal = list()
if host.find("://") == -1:
# assume http by defualt
url = "http://" + host
else:
url = host
try:
req = urlopen(url + "/ports")
motes = req.read().split("\n")
for mote in motes:
info = mote.split(",")
if len(info) < 3:
continue
retVal.append([info[1], info[2], info[0], host])
except Exception as e:
print ("Exception while getting remote motelist: ", e)
return retVal
@staticmethod
def getMotelist(update):
if update:
Motelist.updateMotelist(False)
Motelist.lock.acquire()
# return a copy of connected list
retVal = list(Motelist.motes)
Motelist.lock.release()
return retVal
@staticmethod
def getMoteByUserData(userData):
Motelist.lock.acquire()
result = list()
for mote in Motelist.motes:
if mote.getUserData() == userData:
result.append(mote)
Motelist.lock.release()
return result
@staticmethod
def addUpdateCallback(callback):
Motelist.updateCallbacks.append(callback)
@staticmethod
def removeUpdateCallback(callback):
try:
Motelist.updateCallbacks.remove(callback)
except:
pass
@staticmethod
def updateMotelist(infinite):
Motelist.infinite = infinite
Motelist.__activateCallbacks()
while Motelist.infinite:
time.sleep(1)
Motelist.__activateCallbacks()
@staticmethod
def startPeriodicUpdate():
# Call new Thread
thread = threading.Thread(target = Motelist.updateMotelist, args = (True,),
name = "Motelist thread")
# Must have, if we don't plan on joining this thread
thread.daemon = True
thread.start()
@staticmethod
def stopPeriodicUpdate():
Motelist.infinite = False
@staticmethod
def __activateCallbacks(force = False):
# If no new motes added, no need to call callbacks
if not Motelist.recreateMoteList(comports()) and not force:
return
Motelist.lock.acquire()
updateCallbackTempList = list(Motelist.updateCallbacks)
Motelist.lock.release()
for x in updateCallbackTempList:
try:
x()
except Exception as e:
print ("Exception while calling callback: ", e)
@staticmethod
def portExists(port):
try:
ser = serial.Serial(port, 38400, timeout = 0, parity = serial.PARITY_NONE, rtscts = 1)
while True:
ser.write("")
ser.close()
return True
except serial.SerialException as msg:
return False
@staticmethod
def getMotesByName(name):
return list(m for m in Motelist.getMotelist(True) if name in m.getName())
@staticmethod
def printMotelist():
motelist = Motelist.getMotelist(True)
if len(motelist) == 0:
print ("No attached motes found!")
return
# Prepare table column width
lengths = [len("Reference"), len("Port"), len("Host"), len("Name")]
for mote in motelist:
lengths[0] = max(lengths[0], len(mote.getReference()))
lengths[1] = max(lengths[1], len(mote.getPort()))
lengths[2] = max(lengths[2], len(mote.getHost()))
lengths[3] = max(lengths[3], len(mote.getName()))
# Print header
print ("{} {} {} {}".format("Reference".ljust(lengths[0]),
"Port".ljust(lengths[1]),
"Host".ljust(lengths[2]),
"Name".ljust(lengths[3])))
# Print seperator
print ("{} {} {} {}".format("".ljust(lengths[0], "-"),
"".ljust(lengths[1], "-"),
"".ljust(lengths[2], "-"),
"".ljust(lengths[3], "-")))
# Print motelist
for mote in motelist:
print ("{} {} {} {}".format(mote.getReference().ljust(lengths[0]),
mote.getPort().ljust(lengths[1]),
mote.getHost().ljust(lengths[2]),
mote.getName().ljust(lengths[3])))
def main():
for arg in sys.argv[1:]:
if arg == "-c":
for x in Motelist.getMotelist(True):
print (x.getCSVData())
sys.exit(1)
elif arg == "-h":
print ("Use motelist.py -c for CSV data.")
sys.exit(1)
Motelist.printMotelist()
if __name__ == '__main__':
try:
main()
except SystemExit:
raise #let pass exit() calls
except KeyboardInterrupt:
if DEBUG: raise #show full trace in debug mode
sys.stderr.write("user abort.\n") #short messy in user mode
sys.exit(1)
#except Exception as msg:
# sys.stderr.write("\nAn error occured:\n%s\n" % msg)
# sys.exit(1)
|
repairer.py | # Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2016
# - Vincent Garonne <vgaronne@gmail.com>, 2014-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2014-2015
"""
Judge-Repairer is a daemon to repair stuck replication rules.
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from copy import deepcopy
from datetime import datetime, timedelta
from re import match
from random import randint
from sqlalchemy.exc import DatabaseError
from rucio.common.config import config_get
from rucio.common.exception import DatabaseException
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rule import repair_rule, get_stuck_rules
from rucio.core.monitor import record_counter
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def rule_repairer(once=False):
"""
Main loop to check for STUCK replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-repairers have the correct worker number on the next try
live(executable='rucio-judge-repairer', hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable='rucio-judge-repairer', hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
# Select a bunch of rules for this worker to repair
rules = get_stuck_rules(total_workers=heartbeat['nr_threads'] - 1,
worker_number=heartbeat['assign_thread'],
delta=-1 if once else 1800,
limit=100,
blacklisted_rules=[key for key in paused_rules])
logging.debug('rule_repairer[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, time.time() - start, len(rules)))
if not rules and not once:
logging.debug('rule_repairer[%s/%s] did not get any work (paused_rules=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, str(len(paused_rules))))
graceful_stop.wait(60)
else:
for rule_id in rules:
rule_id = rule_id[0]
logging.info('rule_repairer[%s/%s]: Repairing rule %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id))
if graceful_stop.is_set():
break
try:
start = time.time()
repair_rule(rule_id=rule_id)
logging.debug('rule_repairer[%s/%s]: repairing of %s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id, time.time() - start))
except (DatabaseException, DatabaseError), e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(600, 2400))
logging.warning('rule_repairer[%s/%s]: Locks detected for %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id))
record_counter('rule.judge.exceptions.LocksDetected')
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except (DatabaseException, DatabaseError), e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception, e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable='rucio-judge-repairer', hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Repairer threads.
"""
hostname = socket.gethostname()
sanity_check(executable='rucio-judge-repairer', hostname=hostname)
if once:
rule_repairer(once)
else:
logging.info('Repairer starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_repairer, kwargs={'once': once}) for i in xrange(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
main.py | # -*- coding: utf-8 -*-
import os
import argparse
import logging
#import threading
#import shutil
import datetime
import time
from multiprocessing import Process
import multiprocessing
from paramiko import SSHClient
import paramiko
# Ruta de archivos estaticos
from unipath import Path
# librerias propias
from lib_sysblack.lib_config import load_config
from lib_sysblack.lib_csv import parser_cvs
from lib_sysblack.lib_mail import send_mail
from lib_sysblack.lib_folder_incremental import folder_incremental
# Raiz del proyecto
PROJECT_DIR = Path(__file__).ancestor(1)
NAMEAPP = "forti_backup"
NAME_FILE_LOG = "%s.log" % (NAMEAPP)
NAME_FILE_LOG_PATH = PROJECT_DIR.child(NAME_FILE_LOG)
NAME_FILE_CONFIG = "%s.cfg" % (NAMEAPP)
NAME_FILE_CONFIG_PATH = PROJECT_DIR.child(NAME_FILE_CONFIG)
FILE_CSV = "%s.csv" % (NAMEAPP)
FILE_CSV_PATH = PROJECT_DIR.child(FILE_CSV)
NAME_FOLDER_BACKUP = "backup"
NAME_FOLDER_BACKUP_PATH = PROJECT_DIR.child(NAME_FOLDER_BACKUP)
#outlock = threading.Lock()
def loading_args():
"""Argumento de ejecucion"""
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="Mostrar información en consola.", action="store_true")
parser.add_argument("-c", "--config", help="Nombre de archivo de configuracion.", default=NAME_FILE_CONFIG_PATH)
parser.add_argument("-d", "--debug", help="Mostrar información de depuración.", action="store_true")
parser.add_argument("-t", "--test", help="Tirar una prueba del comando.", action="store_true")
parser.add_argument("-csv", help="Nombre de archivo de configuracion.", default=FILE_CSV_PATH)
args = parser.parse_args()
return args
def log_configuration(args):
"""Configurando los log"""
level_log = logging.INFO
if args.debug:
level_log = logging.DEBUG
logformat = "%(asctime)s %(levelname)s: %(message)s"
logging.basicConfig(filename=NAME_FILE_LOG_PATH, filemode='w', format=logformat, level=level_log)
if args.verbose:
fh = logging.StreamHandler()
logFormatter = logging.Formatter(logformat)
fh.setFormatter(logFormatter)
logging.getLogger().addHandler(fh)
def witter_file(name, data):
file_config = open(name, "w")
file_config.write(data)
file_config.close()
def read_file(filename):
with open(filename) as my_file:
file_read = my_file.read()
return file_read
def conect_fortigate(hostname, port, username, password):
ssh = SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# print ssh
ssh.connect(hostname, port=int(port), username=username, password=password, timeout=10, allow_agent=False, look_for_keys=False)
return ssh
def exec_fortigate(ssh, comando):
#global outlock
stdin, stdout, stderr = ssh.exec_command(comando)
outlines = stdout.read()
#outlines = ""
ssh.close()
# print stdout.readlines()
return outlines
def worker(forti, numero_copias):
"""funcion que realiza el trabajo en el thread"""
logging.info('Process id {} - Start: {}. '.format(os.getpid(), forti["name"]))
try:
ssh = conect_fortigate(
forti["host"],
forti["port"],
forti["user"],
forti["pass"],
)
data = exec_fortigate(ssh, "show full-configuration")
fecha = datetime.date.today()
hora = time.strftime("%H-%M-%S")
ruta_destino = NAME_FOLDER_BACKUP_PATH.child(forti["name"])
name_file_backup_latest = "forti-%s-time-%s.txt" % (fecha, hora)
#path_name_temp = PROJECT_DIR.child(name_file_backup_latest)
path_name_temp = ruta_destino.child(name_file_backup_latest)
#witter_file(name_file_backup_latest, data.replace("--More--", ""))
controller_backup(forti["name"], NAME_FOLDER_BACKUP_PATH, path_name_temp, ruta_destino, numero_copias, data)
logging.info('Process id {} - Copia de Seguridad Completada Correctamente en: {}. '.format(os.getpid(), forti["name"]))
except Exception as e:
logging.error("Process id {} - Error en la copia de seguridad: {} {}.".format(os.getpid(), e, forti["name"]))
def fun_send_mail(config, args, data_log=""):
if config.get("MAIL", "enable") == "yes":
send_from = config.get("MAIL", "send_from")
username = config.get("MAIL", "username")
password = config.get("MAIL", "password")
send_to = config.get("MAIL", "send_to")
files = config.get("MAIL", "files")
if files == "no":
files = None
server = config.get("MAIL", "server")
port = config.get("MAIL", "port")
tls = config.get("MAIL", "tls")
# with open(NAME_FILE_LOG_PATH) as my_file:
# data_log = my_file.read()
subject = config.get("MAIL", "subject")
for email in send_to.split(","):
# Sin es una prueba evite la ejecucion
if args.test:
log = "Modo Test enable, se evito Mandar email a: {email}".format(email=email)
logging.info(log)
continue
try:
send_mail(
username,
password,
send_from,
email.strip(),
subject,
data_log,
files,
server,
port,
tls
)
logging.info('Email enviado correctamente.')
except Exception as e:
#raise e
logging.error("Al Enviar email: {}.".format(e))
def folder_device(folder_backup, name_folder):
# Si no exite la carpeta que guarda el historial del dispositivo, se crea!
if not os.path.exists(folder_backup.child(name_folder)):
os.mkdir(folder_backup.child(name_folder))
def controller_backup(folder_name, backup_path, path_name_temp, ruta_destino, numero_copias, data):
# Nombre del archivo que va quedar con la copia mas reciente
# Si no exite la carpeta que guarda el historial del dispositivo, se crea!
folder_device(backup_path, folder_name)
# print data_cleaned.replace("--More--", "")
witter_file(path_name_temp, data.replace("--More--", ""))
folder_incremental(path_name_temp, ruta_destino, numero_copias, "forti")
def main():
"""Funcion Principal"""
# Cargando variables pasadas como argumentos
args = loading_args()
# estableciendo la configuracion de los logs
log_configuration(args)
logging.debug("Inicio de modo de depuracion.")
# Cargando variables de configuracion
# Esto toma el nombre del archivo conf por defecto y cargar de la raiz del folder
config = load_config(args.config)
list_fortigates = parser_cvs(args.csv, config.get("GENERAL", "fields_csv").split(","))
threads = []
numero_copias = config.get("GENERAL", "number_backup")
for forti in list_fortigates:
# Sin es una prueba evite la ejecucion
if args.test:
log = "Modo Test enable, se evito hacer el backup: {name} {host}:{port} {user} {password}".format(
name=forti["name"],
host=forti["host"],
port=forti["port"],
user=forti["user"],
password=forti["pass"],
)
logging.info(log)
continue
#t = threading.Thread(target=worker, args=(forti, numero_copias))
t = Process(target=worker, args=(forti, numero_copias))
t.start()
threads.append(t)
time.sleep(0.5)
# t.join()
# Si no es una prueba
if not args.test:
for t in threads:
t.join()
# leo el log
file_log_read = read_file(NAME_FILE_LOG_PATH)
# mando el log del proceso al correo
fun_send_mail(config, args, file_log_read)
if __name__ == '__main__':
multiprocessing.freeze_support()
main()
|
email.py | from threading import Thread
from flask_mail import Message
from flask import render_template
from app import app, mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Miroblog] Reset You Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt', user=user, token=token),
html_body=render_template('email/reset_password_request.html', user=user, token=token))
|
qolsys_socket.py | import json
import socket
import ssl
import sys
import time
import asyncio
import threading
import appdaemon.plugins.mqtt.mqttapi as mqtt
#
# qolsys socket manager
#
# args
# yep
#
class qolsys:
################################################################################
# Code
def __init__(self, app):
self._sock = socket.socket
self._wrappedSocket = ssl.SSLContext.wrap_socket
self._listening_thread = threading.Thread()
self._listener_callback = callable
self._hostname = ""
self._port = 12345
self._token = ""
self._timeout = 60
self.app = app
self.__listening__ = True
# logging.basicConfig(filename='qolsys_socket.log', level=logging.DEBUG)
def create_socket(self, hostname, port, token, cb: callable, timeout=60):
self._hostname = hostname
self._port = port
self._token = token
self._listener_callback = cb
self._timeout = timeout
try:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(timeout)
#Set the listener callback at the instance level so we can restart the listener if needed
except socket.error:
self.app.log('Could not create a socket', level="ERROR")
raise
# Wrap SSL
self.app.log("wrapping socket")
self._wrappedSocket = ssl.wrap_socket(self._sock, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1_2)
# Connect to server
try:
#The stupid Qolsys panel requires blocking
# wrappedSocket.setblocking(False)
self.app.log("connecting to socket", level="INFO")
self._wrappedSocket.connect((hostname, port))
self.app.log("Connected wrappedSocket: %s", self._wrappedSocket, level="INFO")
self.app.log("Starting listener thread", level="INFO")
self._start_listener()
#self.listening_thread = threading.Thread(target=self.listen, args=([cb]))
#self.listening_thread.start()
self.app.log("started listener", level="INFO")
return True
except socket.error:
self.app.log("Error creating or connecting to socket %s", sys.exc_info(), level="ERROR")
return False
def _start_listener(self):
self.app.log("Starting listener thread", level="INFO")
self._listening_thread = threading.Thread(target=self.listen, args=([self._listener_callback]))
self._listening_thread.start()
self.app.log("started listener thread", level="INFO")
def _reset_socket(self):
self.close_socket()
#self._listening_thread = threading.Thread(target=self.listen, args=([self._listener_callback]))
self.app.log("Creating socket", level="INFO")
self.__listening__ = True
self.create_socket(self._hostname, self._port, self._token, self._listener_callback, self._timeout)
def close_socket(self):
self.app.log("Detatching from wrapped socket", level="WARNING")
self.__listening__ = False
self._wrappedSocket.detach()
self.app.log("Closing socket", level="WARNING")
self._sock.close()
time.sleep(1)
def send_to_socket(self, message: json):
self._wrappedSocket.send(b'\n')
self._wrappedSocket.send((json.dumps(message)).encode())
return True
def listen(self, cb: callable):
#listening = True
self.app.log("starting listen", level="INFO")
data = ""
#err = ""
while not (self._wrappedSocket._connected):
self.app.log("not connected yet", level="WARNING")
self.app.log(self._wrappedSocket._connected, level="INFO")
time.sleep(1)
try:
while self._wrappedSocket._connected and self.__listening__:
data = self._wrappedSocket.recv(4096).decode()
if len(data) > 0:
self.app.log("data received from qolsys panel: %s len(data): %s", data, len(data), level="DEBUG")
if is_json(data):
try:
cb(data)
except:
self.app.log("Error calling callback: %s", cb, sys.exc_info(), level="ERROR")
#print(data)
else:
if data != 'ACK\n':
pass
#self.app.log(("non json data:", data))
else:
self.app.log("No data received. Bad token? Detatching.", level="ERROR")
self._wrappedSocket.detach()
raise NoDataError
self.app.log("stopped listening on qolsys socket", level="INFO")
except socket.timeout:
self.app.log("socket timeout", level="WARNING")
except NoDataError:
self._reset_socket()
raise NoDataError
except:
self.app.log("listen failed/stopped: %s", sys.exc_info(), level="ERROR")
def is_json(myjson):
try:
json_object = json.loads(myjson)
if json_object: return True
except:
#if myjson != 'ACK\n':
#self.app.log(("not json: %s", myjson), level="WARNING")
#self.app.log(("Error: %s", sys.exc_info()), level="ERROR")
return False
class NoDataError(Exception):
pass |
test_initialize.py | import multiprocessing as mp
import numpy
import psutil
import pytest
from dask import array as da
from distributed import Client
from distributed.deploy.local import LocalCluster
from dask_cuda.initialize import initialize
from dask_cuda.utils import _ucx_110, _ucx_111, get_ucx_config
mp = mp.get_context("spawn") # type: ignore
ucp = pytest.importorskip("ucp")
# Notice, all of the following tests is executed in a new process such
# that UCX options of the different tests doesn't conflict.
# Furthermore, all tests do some computation to trigger initialization
# of UCX before retrieving the current config.
def _test_initialize_ucx_tcp():
kwargs = {"enable_tcp_over_ucx": True}
initialize(**kwargs)
with LocalCluster(
protocol="ucx",
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
config={"distributed.comm.ucx": get_ucx_config(**kwargs)},
) as cluster:
with Client(cluster) as client:
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "tcp" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
if _ucx_110:
assert "tcp" in conf["SOCKADDR_TLS_PRIORITY"]
else:
assert "sockcm" in conf["TLS"]
assert "sockcm" in conf["SOCKADDR_TLS_PRIORITY"]
return True
assert client.run_on_scheduler(check_ucx_options) is True
assert all(client.run(check_ucx_options).values())
def test_initialize_ucx_tcp():
p = mp.Process(target=_test_initialize_ucx_tcp)
p.start()
p.join()
assert not p.exitcode
def _test_initialize_ucx_nvlink():
kwargs = {"enable_nvlink": True}
initialize(**kwargs)
with LocalCluster(
protocol="ucx",
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
config={"distributed.comm.ucx": get_ucx_config(**kwargs)},
) as cluster:
with Client(cluster) as client:
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "cuda_ipc" in conf["TLS"]
assert "tcp" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
if _ucx_110:
assert "tcp" in conf["SOCKADDR_TLS_PRIORITY"]
else:
assert "sockcm" in conf["TLS"]
assert "sockcm" in conf["SOCKADDR_TLS_PRIORITY"]
return True
assert client.run_on_scheduler(check_ucx_options) is True
assert all(client.run(check_ucx_options).values())
def test_initialize_ucx_nvlink():
p = mp.Process(target=_test_initialize_ucx_nvlink)
p.start()
p.join()
assert not p.exitcode
def _test_initialize_ucx_infiniband():
kwargs = {"enable_infiniband": True}
if not _ucx_110:
kwargs["net_devices"] = "ib0"
initialize(**kwargs)
with LocalCluster(
protocol="ucx",
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
config={"distributed.comm.ucx": get_ucx_config(**kwargs)},
) as cluster:
with Client(cluster) as client:
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "rc" in conf["TLS"]
assert "tcp" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
if _ucx_110:
assert "tcp" in conf["SOCKADDR_TLS_PRIORITY"]
else:
assert "sockcm" in conf["TLS"]
assert "sockcm" in conf["SOCKADDR_TLS_PRIORITY"]
assert conf["NET_DEVICES"] == "ib0"
return True
assert client.run_on_scheduler(check_ucx_options) is True
assert all(client.run(check_ucx_options).values())
@pytest.mark.skipif(
"ib0" not in psutil.net_if_addrs(), reason="Infiniband interface ib0 not found"
)
def test_initialize_ucx_infiniband():
p = mp.Process(target=_test_initialize_ucx_infiniband)
p.start()
p.join()
assert not p.exitcode
def _test_initialize_ucx_all():
initialize()
with LocalCluster(
protocol="ucx",
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
config={"distributed.comm.ucx": get_ucx_config()},
) as cluster:
with Client(cluster) as client:
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert conf["TLS"] == "all"
assert all(
[
p in conf["SOCKADDR_TLS_PRIORITY"]
for p in ["rdmacm", "tcp", "sockcm"]
]
)
return True
assert client.run_on_scheduler(check_ucx_options) is True
assert all(client.run(check_ucx_options).values())
@pytest.mark.skipif(
not _ucx_111, reason="Automatic configuration not supported in UCX < 1.11",
)
def test_initialize_ucx_all():
p = mp.Process(target=_test_initialize_ucx_all)
p.start()
p.join()
assert not p.exitcode
|
testrunner.py | __author__ = 'Steven Summers'
__version__ = ''
import argparse
# import builtins
import difflib
import importlib.util
import inspect
import io
import json
import re
import sys
import textwrap
import threading
import time
import traceback
import unittest
from bdb import Bdb
from collections import OrderedDict
from enum import Enum, unique
from functools import wraps
from types import FunctionType, ModuleType, TracebackType
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union
# GLOBALS TO EXCLUDE FILES IN TRACEBACK
__TEST_RUNNER = True
setattr(threading, '__TEST_RUNNER', True) # Don't like this but otherwise regex
__all__ = ['AttributeGuesser', 'OrderedTestCase', 'RedirectStdIO', 'TestCase', 'TestMaster',
'skipIfFailed', 'timeout']
# DEFAULTS
DEFAULT_TIMEOUT = 0
# MIN_PY_VERSION = (3, 7, 0)
# CONSTANTS
DIFF_OMITTED = '\nDiff is {} characters long. Set TestMaster(max_diff=None) to see it.'
DUPLICATE_MSG = 'AS ABOVE'
CLOSE_MATCH_CUTOFF = 0.8
TAB_SIZE = 4
BLOCK_WIDTH = 80
BLOCK_TEMPLATE = """\
/{0}\\
|{{:^{1}}}|
\\{0}/\
""".format('-' * (BLOCK_WIDTH - 2), BLOCK_WIDTH - 2)
@unique
class TestOutcome(Enum):
SUCCESS = '+'
FAIL = '-'
SKIP = '?'
def skipIfFailed(test_case: Type[unittest.TestCase] = None, test_name: str = None, tag: str = None):
"""
skipIfFail decorator allows you to skip entire TestCases or specific test
cases if not all tests pass for a TestCase, or if a specific test case fails
(skipped counts as a fail).
At least one test method of TestCase1 needs to fail to skip
@skipIfFailed(TestCase1)
Skip if 'test_method' of TestCase1 failed
@skipIfFailed(TestCase1, 'test_method')
Skip if 'test_method' failed
Can only be applied to method with class class containing a method
named 'test_method'
@skipIfFailed(test_name='test_method')
"""
if test_case is None and test_name is None:
raise RuntimeError("test_case and test_name for skipIfFailed can't both be None")
if test_case is not None and test_name is not None and not hasattr(test_case, test_name):
raise AttributeError(f'{test_case.__name__} has no method {test_name}')
if tag is not None and test_name is None:
raise RuntimeError("test_name must be specified if tag is provided for skipIfFailed")
def decorator(obj: Union[Type[TestCase], Callable]):
if hasattr(obj, '__skip_test__'):
obj.__skip_test__ = obj.__skip_test__.copy()
obj.__skip_test__.append((test_case, test_name, tag))
else:
obj.__skip_test__ = [(test_case, test_name, tag)]
if not inspect.isfunction(obj):
return obj
@wraps(obj)
def wrapper(*args, **kwargs):
return obj(*args, **kwargs)
return wrapper
return decorator
def import_module(name: str, path: str) -> Tuple[Optional[ModuleType], Optional[Tuple[Type, Exception, TracebackType]]]:
"""
Dynamically import the Python file (.py) at 'path' the
__name__ attribute will be set to 'name'
"""
if not name:
raise ValueError("'name' can not be empty")
spec = importlib.util.spec_from_file_location(name, path)
if spec is None:
raise ValueError(f'The path {path} is invalid. It should be a Python (.py) file path.')
module = importlib.util.module_from_spec(spec)
with RedirectStdIO(stdin=True, stdout=True) as stdio:
try:
spec.loader.exec_module(module)
setattr(module, '__TEST_RUNNER_CLEAN_IMPORT', stdio.stdout == '')
return module, None
except BaseException:
return None, sys.exc_info()
def _timeout_wrapper(test_func):
"""
Runs the test function in a killable thread, the seconds value
is obtained from the __timeout__ attribute which can be set globally
using TestMaster(timeout=value) or apply to specific classes or functions
using the timeout decorator, if seconds <= 0 the test is not threaded.
"""
@wraps(test_func)
def thread_wrapper(self):
secs = getattr(test_func, '__timeout__', 0) or \
getattr(self.__class__, '__timeout__', 0) or \
_TimeoutThread.timeout
if secs <= 0:
return test_func(self)
try:
thread = _TimeoutThread(name=test_func.__qualname__,
target=test_func, args=(self,))
threading.settrace(thread.global_trace)
thread.start()
thread.join(secs)
alive = thread.isAlive()
thread.kill()
# re-join to ensure thread completes any blocking operations. This is
# really only required because long blocking calls may result
# in sequential tests using RedirectStdIO not setting back correctly
thread.join()
finally:
threading.settrace(None)
if alive:
raise unittest.SkipTest(f'Function ran longer than {secs} second(s)')
if thread.exc_info is not None:
raise thread.exc_info[1].with_traceback(thread.exc_info[2])
return None
return thread_wrapper
def timeout(seconds: float = 0):
"""
Decorator to apply __timeout__ attribute to a test method or TestCase
"""
def timeout_decorator(test_obj):
test_obj.__timeout__ = seconds
return test_obj
return timeout_decorator
def get_object_name(obj):
return getattr(obj, '__qualname__', None) or getattr(obj, '__name__', None) or obj.__class__.__name__
class CachedIO(io.StringIO):
""" Writes all read values and write values to stream """
def __init__(self, stream):
super().__init__()
self._stream = stream
def set_value(self, string):
""" Set value to self without writing to stream """
self.seek(0)
self.truncate()
super().write(string)
self.seek(0)
def write(self, s: str):
res = super().write(s)
self._stream.write(s)
return res
def readline(self, size: int = None):
res = super().readline(size)
self._stream.write(res)
return res
class RedirectStdIO:
"""
Context manager to send stdin input and capture stdout and stderr
Usage:
with RedirectStdIO(stdin=True, stdout=True) as stdio:
stdio.set_stdin('World!\n')
inp = input('Hello')
stdio.stdout == 'Hello'
inp == 'World'
"""
def __init__(self, *, stdin: bool = False, stdout: bool = False,
stderr: bool = False, stdinout: bool = False):
self._sys_stdin = sys.stdin
self._sys_stdout = sys.stdout
self._sys_stderr = sys.stderr
if stdinout:
self._stdinout_stream = io.StringIO()
self._stdin_stream = CachedIO(self._stdinout_stream)
self._stdout_stream = CachedIO(self._stdinout_stream)
else:
self._stdinout_stream = None
self._stdin_stream = io.StringIO() if stdin else None
self._stdout_stream = io.StringIO() if stdout else None
self._stderr_stream = io.StringIO() if stderr else None
def __enter__(self):
if self._stdin_stream is not None:
sys.stdin = self._stdin_stream
if self._stdout_stream is not None:
sys.stdout = self._stdout_stream
if self._stderr_stream is not None:
sys.stderr = self._stderr_stream
return self
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdin = self._sys_stdin
sys.stdout = self._sys_stdout
sys.stderr = self._sys_stderr
@staticmethod
def _read_stream(stream: io.StringIO) -> str:
if stream is None:
raise RuntimeError(
'Attempt to read from a stream that has not been enabled')
return stream.getvalue()
def set_stdin(self, string: str):
if self._stdin_stream is None:
raise RuntimeError(
f'stdin has not been set in {self.__class__.__name__}.__init__')
if self._stdinout_stream is None:
self._stdin_stream.seek(0)
self._stdin_stream.truncate()
self._stdin_stream.write(string)
self._stdin_stream.seek(0)
else:
self._stdin_stream.set_value(string)
@property
def stdin(self):
if self._stdin_stream is None:
raise RuntimeError(
f'stdin has not been set in {self.__class__.__name__}.__init__')
pos = self._stdin_stream.tell()
value = self._stdin_stream.read()
self._stdin_stream.seek(pos)
return value
@property
def stdout(self) -> str:
return self._read_stream(self._stdout_stream)
@property
def stderr(self) -> str:
return self._read_stream(self._stderr_stream)
@property
def stdinout(self):
return self._read_stream(self._stdinout_stream)
class RecursionDetector(Bdb):
def __init__(self, *args):
super().__init__(*args)
self._stack = set()
def do_clear(self, arg):
pass
def user_call(self, frame, argument_list):
code = frame.f_code
if code in self._stack:
raise RecursionError
self._stack.add(code)
def user_return(self, frame, return_value):
self._stack.remove(frame.f_code)
class AttributeGuesser:
"""
Wrapper class for objects to return the attribute with the
closest matching name. If fail is True then a TestCase.failureException
is raised if no possible match is found.
"""
def __init__(self, obj: Any, fail: bool = True):
"""
Parameters:
obj: Object to wrap for guessing attributes of
fail: if attribute can't be found
raise exception iff True otherwise return None
"""
if isinstance(obj, AttributeGuesser):
obj = getattr(obj, '_AttributeGuesser__object')
self.__object = obj
self.__cache = {}
self.__fail = fail
@classmethod
def get_wrapped_object(cls, attr_guesser):
if not isinstance(attr_guesser, AttributeGuesser):
raise ValueError('attr_guesser must be an instance of AttributeGuesser')
return object.__getattribute__(attr_guesser, '_AttributeGuesser__object')
def __guess_attribute(self, obj: Any, name: str):
attributes = dict(inspect.getmembers(obj))
matches = difflib.get_close_matches(name, attributes, n=1, cutoff=CLOSE_MATCH_CUTOFF)
if not matches:
if self._AttributeGuesser__fail:
raise AttributeError(
f"Found no close match for '{get_object_name(obj)}.{name}'")
return None
return attributes[matches[0]]
def __getattribute__(self, key: str):
if key in ('_AttributeGuesser__object', '_AttributeGuesser__cache',
'_AttributeGuesser__guess_attribute', '_AttributeGuesser__fail'):
return object.__getattribute__(self, key)
return getattr(object.__getattribute__(self, '_AttributeGuesser__object'), key)
def __getattr__(self, key: str):
cache = self._AttributeGuesser__cache
if key in cache:
return cache[key]
attr = self._AttributeGuesser__guess_attribute(self._AttributeGuesser__object, key)
cache[key] = attr
return attr
def __setattr__(self, key: str, value: Any):
if key in ('_AttributeGuesser__object', '_AttributeGuesser__cache',
'_AttributeGuesser__fail'):
return object.__setattr__(self, key, value)
return setattr(self._AttributeGuesser__object, key, value)
def __repr__(self):
return f'AttributeGuesser({self._AttributeGuesser__object!r})'
class _TimeoutThread(threading.Thread):
"""
Killable thread
"""
timeout: float = DEFAULT_TIMEOUT
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.killed = False
self.exc_info = None
def run(self):
"""
Set the trace function and run the thread catching and storing
any exceptions that occur.
"""
try:
super().run()
except BaseException:
self.exc_info = sys.exc_info()
def kill(self):
""" Set the thread to terminate at the next trace event """
self.killed = True
def global_trace(self, _frame, event, _arg):
"""
Global trace function for threading.settrace which returns a local
trace function
"""
if event == 'call':
return self.local_trace
return None
def local_trace(self, _frame, event, _arg):
"""
Local trace function which kills the thread should it still be running
and the 'killed' attribute is set to True.
"""
if self.killed:
if event == 'line':
raise SystemExit
return self.local_trace
class TestLoader(unittest.TestLoader):
""" Custom loader class to specify TestCase case order """
def getTestCaseNames(self, testCaseClass: Type['TestCase']):
"""
Override for unittest.TestLoad.getTestCaseNames
Return a sorted sequence of method names found within testCaseClass
"""
if issubclass(testCaseClass, OrderedTestCase):
return testCaseClass.member_names
return super().getTestCaseNames(testCaseClass)
def loadTestCases(self, test_cases: List) -> unittest.TestSuite:
"""
Params:
test_cases List[Union[unittest.TestCase, Type[unittest.TestCase]]]
"""
suite = unittest.TestSuite()
for test_case in test_cases:
if isinstance(test_case, unittest.TestCase):
suite.addTest(test_case)
else:
suite.addTests(self.loadTestsFromTestCase(test_case))
return suite
class _TestCaseMeta(type):
"""
MetaClass to decorate all test methods with _timeout_wrapper and
track test method definition order.
"""
def __new__(mcs, name, bases, namespace):
member_names = []
prefix = TestLoader.testMethodPrefix
for key, value in namespace.items():
if key.startswith(prefix) and callable(value):
member_names.append(key)
namespace[key] = _timeout_wrapper(value)
result = super().__new__(mcs, name, bases, namespace)
result.member_names = member_names
return result
# def __getattr__(cls, item):
# if item not in cls._modules:
# raise AttributeError(f"type object '{cls.__name__}'' has no attribute '{item}'")
# return cls._modules[item]
class TestCase(unittest.TestCase, metaclass=_TestCaseMeta):
"""
Extends the unittest.TestCase defining additional assert methods.
"""
member_names: List[str]
_modules: Dict[str, ModuleType] = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.aggregated_tests = []
def __getattr__(self, item):
if item not in self._modules:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{item}'")
return self._modules[item]
@classmethod
def register_module(cls, name: str, module: ModuleType):
cls._modules[name] = module
def assertIsCleanImport(self, module, msg=None):
self.assertIs(getattr(module, '__TEST_RUNNER_CLEAN_IMPORT'), True, msg=msg)
def assertMultiLineEqual(self, first: str, second: str, msg: Optional[str] = None, strip: bool = False):
"""
unittest.TestCase.assertMultiLineEqual with strip keyword arg,
if True then string is split on newlines with trailing
whitespace striped and rejoined before
"""
if strip:
first = '\n'.join(s.rstrip() for s in first.splitlines()) + '\n'
second = '\n'.join(s.rstrip() for s in second.splitlines()) + '\n'
super().assertMultiLineEqual(first, second, msg=msg)
def assertDefined(self, obj: Union[ModuleType, Type], name: str):
if obj is None:
self.fail(msg=f"Got 'None' when checking if '{name}' was defined for a type")
obj_name = get_object_name(obj)
if not hasattr(obj, name):
self.fail(msg=f"'{obj_name}.{name}' is not defined correctly or not implemented")
def assertFunctionDefined(self, obj: Union[ModuleType, Type], function_name: str, params: int):
self.assertDefined(obj, function_name)
obj_name = get_object_name(obj)
func = getattr(obj, function_name)
if not inspect.isfunction(func):
if inspect.ismethoddescriptor(func):
self.fail(msg=f"{obj_name}.{function_name} needs to be implemented")
self.fail(msg=f"{obj_name}.{function_name} should be a function")
num_params = len(inspect.signature(func).parameters)
self.assertEqual(num_params, params,
msg=(f"'{function_name}' does not have the correct number of parameters, "
f"expected {params} found {num_params}"))
def assertClassDefined(self, module: ModuleType, class_name: str):
self.assertDefined(module, class_name)
class_ = getattr(module, class_name)
self.assertIs(inspect.isclass(class_), True, msg=f"{class_name} should be a class")
def assertIsSubclass(self, sub_class: Type, parent_class: Type):
self.assertIs(issubclass(sub_class, parent_class), True,
msg=f"'{sub_class}' is not a subclass of '{parent_class}'")
def assertDocString(self, obj: Union[Type, Callable], name: str = None):
if name is not None:
# self.assertDefined(obj, name)
obj = getattr(obj, name)
if obj is None:
self.fail(msg=f"Got 'None' when checking if docstring was defined for a type")
# used over inspect.getdoc to require a doc string rather than inheriting it
doc = getattr(obj, '__doc__', None)
if doc is None or doc.strip() == '':
self.fail(msg=f"Documentation string is required for '{obj.__qualname__}'")
def assertListSimilar(self, actual: List, expected: List):
# Try if sortable
# try:
# s1 = sorted(actual)
# s2 = sorted(expected)
# self.assertListEqual(s1, s2)
# return
# except TypeError:
# pass
# Fallback
unexpected = list(actual)
missing = []
for elem in expected:
try:
unexpected.remove(elem)
except ValueError:
missing.append(elem)
if unexpected or missing:
msg = f'Lists are not similar\n\nActual: {actual}\nExpected: {expected}'
if missing:
msg += f"\nMissing: {missing}"
if unexpected:
msg += f"\nUnexpected: {unexpected}"
self.fail(msg=msg)
def assertIsNotRecursive(self, func):
detector = RecursionDetector()
detector.set_trace()
is_recursive = False
try:
func()
except RecursionError:
is_recursive = True
finally:
sys.settrace(None)
if is_recursive:
self.fail(msg="function should not be recursive")
def aggregate(self, test_func: Callable, *args, tag: str = None, **kwargs):
try:
test_func(*args, **kwargs)
except (self.failureException, unittest.SkipTest) as failure:
self.aggregated_tests.append((failure, tag))
def aggregate_tests(self):
"""
Must be called when done with the AggregateTestCase to propagate
the failures. This is not in __exit__ due to hiding relevant traceback
levels the exception message ends up pointing to the last line.
"""
msg = ''
for error, tag, in self.aggregated_tests:
msg += '\n' + textwrap.indent(str(error), ' ' * TAB_SIZE) + \
(f' :: {tag}' if tag is not None else '')
if msg:
self.fail(msg=msg)
def _truncateMessage(self, message, diff):
"""
override unittest.TestCase._truncateMessage to use DIFF_OMITTED message
"""
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + DIFF_OMITTED.format(len(diff))
@property
def name(self) -> str:
return self._testMethodName
@property
def description(self) -> str:
short_desc = self.shortDescription()
return short_desc if short_desc else self.name
class OrderedTestCase(TestCase):
""" TestCase with the description property reflecting the test number """
@property
def description(self):
return f'{self.member_names.index(self.name) + 1}. {super().description}'
class TestResult(unittest.TestResult):
"""
TestResult stores the result of each test in the order they were executed
"""
def __init__(self, stream=None, descriptions=None, verbosity=None):
super().__init__(stream, descriptions, verbosity)
self._start = 0
self._stop = 0
# TestCaseClassName TestCaseName
self.results: Dict[str, Dict[str, Tuple[TestCase, TestOutcome]]] = OrderedDict()
def startTestRun(self):
self._start = time.time()
super().startTestRun()
def stopTestRun(self):
self._stop = time.time()
super().stopTestRun()
@property
def run_time(self):
return self._stop - self._start
def startTest(self, test: TestCase):
test_cls_name = test.__class__.__name__
if test_cls_name not in self.results:
self.results[test_cls_name] = OrderedDict()
test_method = getattr(test.__class__, test.name)
self._apply_skip(test, test.__class__)
self._apply_skip(test, test_method)
super().startTest(test)
def _apply_skip(self, test: TestCase, test_item: Union[Type[TestCase], FunctionType]):
"""
Applies the unittest attributes used for skipping tests if the
__skip_test__ attribute has been applied to either the test class or
method using the skipIfFailed decorator.
"""
skip_test = getattr(test_item, '__skip_test__', None)
if skip_test is None:
return
for test_cls, test_name, tag in skip_test:
if test_cls is None: # if none then decorator was applied to current TestCase
# Set type of current TestCase and check if test method is defined
test_cls = test.__class__
if not hasattr(test_cls, test_name):
raise AttributeError(f'{test_cls.__name__} has no method {test_name}')
test_cls_name = test_cls.__name__
# Check if TestCase has been run
test_results = self.results.get(test_cls_name)
if test_results is None:
raise RuntimeError(
f"Can't check to skip {test.__class__.__name__}.{test.name} if {test_cls_name} has not run")
# Check if test for TestCase has been run
if test_name is not None and test_name not in test_results:
raise RuntimeError(f"Can't check to skip {test.__class__.__name__}.{test.name} '"
f"if {test_cls_name}.{test_name} has not run")
if test_name is not None:
test_case, outcome = test_results[test_name]
if outcome != TestOutcome.SUCCESS and \
(tag is None or (tag is not None and any(t == tag for _, t in test_case.aggregated_tests))):
# set attributes unittest looks for if a test is marked to skip
test_item.__unittest_skip__ = True
tag_msg = f" with tag '{tag}'" if tag is not None else ''
test_item.__unittest_skip_why__ = f'Skipped due to failing/skipping {test_cls_name}.{test_name}{tag_msg}'
break
elif test_name is None and any(outcome != TestOutcome.SUCCESS for _, outcome in test_results.values()):
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = f'Skipped due to failing/skipping a test from {test_cls_name}'
break
# set custom attribute to None since __unittest_skip__ has been applied
test_item.__skip_test = None
def addSubTest(self, test, subtest, err):
raise NotImplementedError("TODO")
def add_outcome(self, test: TestCase, outcome: TestOutcome):
self.results[test.__class__.__name__][test.name] = (test, outcome)
def addSuccess(self, test: TestCase):
self.add_outcome(test, TestOutcome.SUCCESS)
super().addSuccess(test)
@unittest.result.failfast
def addFailure(self, test: TestCase, err: Tuple[Type[BaseException], BaseException, TracebackType]):
self.add_outcome(test, TestOutcome.FAIL)
super().addFailure(test, err)
@unittest.result.failfast
def addError(self, test: TestCase, err: Tuple[Type[Exception], BaseException, TracebackType]):
self.add_outcome(test, TestOutcome.FAIL)
super().addError(test, err)
def addSkip(self, test: TestCase, reason: str):
self.add_outcome(test, TestOutcome.SKIP)
super().addSkip(test, reason)
def _is_relevant_tb_level(self, tb):
"""
Override which is used with unittest.TestResult._exc_info_to_string to
determine what levels of a traceback to skip when formatting the error.
"""
return '__TEST_RUNNER' in tb.tb_frame.f_globals or super()._is_relevant_tb_level(tb)
def to_dict(self):
return {
test_cls:
{name: outcome.value for name, (test, outcome) in res.items()}
for test_cls, res in self.results.items()
}
class TestNoPrint(TestCase):
def __init__(self, stdio: RedirectStdIO):
super().__init__()
self._stdio = stdio
def runTest(self):
""" check for no unexpected prints """
self.assertEqual(self._stdio.stdout, '')
class TestMaster:
"""
Core driving class which creates the TestSuite from the provided TestCases
"""
separator1 = '=' * BLOCK_WIDTH
separator2 = '-' * BLOCK_WIDTH
indent = ' ' * TAB_SIZE
_remove_path = re.compile(r'File ".*[\\/]([^\\/]+.py)"')
# _remove_threading = re.compile(
# r'(^\s*File \".*threading.py\".+?(?=\s*File \"))', flags=re.DOTALL | re.MULTILINE)
_remove_importlib = re.compile(
r'(^\s*File \".*importlib.*\".+?(?=\s{2}File \"))', flags=re.DOTALL | re.MULTILINE)
def __init__(self,
max_diff: int = None,
suppress_stdout: bool = True,
timeout: float = DEFAULT_TIMEOUT,
output_json: bool = False,
hide_paths: bool = True,
ignore_import_fails: bool = False,
include_no_print: bool = False,
scripts: List[Tuple[str, str]] = ()):
"""
Parameters:
max_diff: Determines the maximum length of diffs output by assert
methods that report diffs on failure. Set to None for no max
suppress_stdout: If True all uncaught stdout output is suppressed
timeout: global timeout value in seconds, if a timeout > 0 is
specified then the tests are run in killable threads.
output_json: outputs text summary if True else in json format.
hide_paths: if True file paths in traceback messages for failures
are removed to only contain the filename.
ignore_import_fails: If set to True not tests will run if any module
being imported with 'scripts' fails to import correctly.
Otherwise all tests will run.
include_no_print: iff True adds a test for uncaught prints during
tests. Requires suppress_stdout to be set as well.
scripts: list of tuples, these tuples are a pair of module name and
module path that gets imported using 'path' with the __name__
attribute of the module set to 'name'. On successful import a
__TEST_RUNNER_CLEAN_IMPORT attribute is set on the module True
if nothing was output to stdout otherwise False.
"""
# argparse setup
parser = argparse.ArgumentParser()
parser.add_argument("-j", "--json",
help="Whether or not to display output in JSON format.",
action='store_true',
default=output_json)
parser.add_argument("-d", "--diff",
help="The maximum number of characters in a diff",
action="store",
default=max_diff,
type=int)
parser.add_argument("-t", "--timeout",
help="The maximum time a test is allowed to run before being killed",
action="store",
default=timeout,
type=float)
parser.add_argument('-p', '--paths', nargs="+")
parser.add_argument('-s', '--scripts', nargs="+")
parser.add_argument("--hide-tb-paths",
help="Hide paths from traceback output.",
action="store_true",
default=hide_paths)
parser.add_argument("--show-tb-duplicates",
help="Remove duplicates from test output.",
action="store_true",
default=False)
parser.add_argument("--ignore-import-fails",
help="Continue tests even if an import fails",
action="store_true",
default=ignore_import_fails)
parser.add_argument("--include-no-print",
help="Adds test case for unexpected prints in functions",
action="store_true",
default=include_no_print)
parser.add_argument("--suppress-stdout",
help="Suppresses uncaught stdout output while running tests",
action="store_true",
default=suppress_stdout)
self._args = args = parser.parse_args()
TestCase.maxDiff = args.diff
_TimeoutThread.timeout = args.timeout
if args.scripts or args.paths:
if len(args.scripts or ()) != len(args.paths or ()):
parser.error("must have equal number of values for 'imports' and 'paths'")
scripts = zip(args.scripts, args.paths)
self.result = None
self._import_errors = []
# import scripts
for name, path in scripts:
name = name.strip()
module, error = import_module(name, path)
if module is not None:
module: ModuleType = AttributeGuesser(module)
TestCase.register_module(name, module)
if error:
self._import_errors.append(self.format_error(name, error))
if not args.ignore_import_fails:
break
@staticmethod
def _add_flavour(flavour: str, test_results: List[Tuple[TestCase, str]]):
return [(flavour, test, msg) for test, msg in test_results]
def print_results(self, failed_tests: List[Tuple[str, TestCase, str]], result: TestResult):
# print summary
print(BLOCK_TEMPLATE.format('Summary of Results'))
for test_cls, test_cases in result.results.items():
print(test_cls)
for _test_name, (test, outcome) in test_cases.items():
print(f'{self.indent}{outcome.value} {test.description}')
# failed imports
if self._import_errors:
print(self.separator2)
print(BLOCK_TEMPLATE.format('Failed Imports'))
for err_type, _, err_msg in self._import_errors:
print(self.separator1)
print(f'REASON: {err_type.upper()}')
print(self.separator2)
print(textwrap.indent(err_msg, self.indent))
# print fails
if failed_tests:
print(self.separator2)
print(BLOCK_TEMPLATE.format('Failed/Skipped Tests'))
prev = None
for flavour, test, msg in failed_tests:
if self._args.show_tb_duplicates:
self.print_error(flavour, test, msg.strip())
else:
self.print_error(flavour, test, DUPLICATE_MSG if msg == prev else msg.strip())
prev = msg
def print_error(self, flavour: str, test: TestCase, msg: str):
print(self.separator1)
print(f'{flavour}: {test.__class__.__name__} {test.description}')
print(self.separator2)
if self._args.hide_tb_paths:
msg = self._remove_path.sub(r'File "\1"', msg)
# msg = self._remove_threading.sub('', msg)
print(textwrap.indent(msg, self.indent))
print()
def format_error(self, name: str, exc_info) -> Tuple[str, str, str]:
exc_type, exc_value, exc_traceback = exc_info
if exc_type is ImportError:
msg = f"Tests not run due to {name} file not found"
err_type = 'import'
elif exc_type is SyntaxError:
msg = "Tests not run due to syntax error"
err_type = 'syntax'
elif exc_type is EOFError:
msg = "Tests not run due to unexpectedly waiting for input"
err_type = 'eof'
elif exc_type is IndentationError:
msg = "Tests not run due to indentation error"
err_type = 'indentation'
else:
msg = "Tests not run due to arbitrary exception"
err_type = 'exception'
err_msg = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
err_msg = self._remove_importlib.sub('', err_msg)
if self._args.hide_tb_paths:
err_msg = self._remove_path.sub(r'File "\1"', err_msg)
return err_type, msg, err_msg
def output_results(self, all_tests: List[TestCase], result: TestResult):
runtime = result.run_time
total = result.testsRun
fails, skips = len(result.failures) + len(result.errors), len(result.skipped)
passed = total - fails - skips
if self._args.json:
errors = []
for err_type, msg, err_msg in self._import_errors:
errors.append(dict(error=err_type, error_message=f'{msg}\n{err_msg}'))
data = dict(total=total, failed=fails, skipped=skips, passed=passed,
time=runtime, results=result.to_dict(), errors=errors)
json.dump(data, sys.stdout, indent=4)
else:
# Join the lists sorted by the test order
failed_tests = sorted(
self._add_flavour('FAIL', result.failures) +
self._add_flavour('ERROR', result.errors) +
self._add_flavour('SKIP', result.skipped),
key=lambda t: all_tests.index(t[1]))
self.print_results(failed_tests, result)
print(self.separator2)
print(f'Ran {total} tests in {runtime:.3f} seconds with '
f'{passed} passed/{skips} skipped/{fails} failed.')
def run(self, test_cases: List[Union[TestCase, Type[TestCase]]]) -> Optional[TestResult]:
if not self._args.ignore_import_fails and self._import_errors:
err_type, msg, err_msg = self._import_errors[0]
if self._args.json:
data = dict(error=err_type, error_message=f'{msg}\n{err_msg}')
json.dump(data, sys.stdout, indent=4)
else:
print(BLOCK_TEMPLATE.format(msg))
print(err_msg)
return None
suite = TestLoader().loadTestCases(test_cases)
# hide unittest output
with RedirectStdIO(stdout=self._args.suppress_stdout, stderr=True) as stdio:
runner = unittest.TextTestRunner(stream=None,
verbosity=0,
resultclass=TestResult)
if self._args.include_no_print:
if not self._args.suppress_stdout:
raise RuntimeError("Can't test for no print without suppressing stdout")
suite.addTest(TestNoPrint(stdio))
all_tests = list(suite)
result = runner.run(suite)
self.output_results(all_tests, result)
return result
|
base.py | '''using socketer server to get data and process data.'''
import socket
import time
from threading import Thread
from queue import Queue
def log(x):
from datetime import datetime
time_now = datetime.now().strftime("%m-%d-%H:%M:%S.%f ")
print(time_now + x)
class BaseSocket:
def __init__(self, host, port, debug=False) -> None:
# create socketer obj
print('[Info] server start')
serversocket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((host, port))
serversocket.listen(1)
self.serversocket = serversocket
self.queue = Queue()
self.t = Thread(target=self.run)
self.t.start()
self.debug = debug
self.disconnect = False
@staticmethod
def recvLine(sock):
flag = True
result = b''
while not result.endswith(b'\n'):
res = sock.recv(1)
if not res:
flag = False
break
result += res
return flag, result.strip().decode('ascii')
@staticmethod
def recvAll(sock, l):
l = int(l)
result = b''
while (len(result) < l):
t = sock.recv(l - len(result))
result += t
return result.decode('ascii')
def run(self):
while True:
clientsocket, addr = self.serversocket.accept()
print("[Info] Connect: %s" % str(addr))
self.disconnect = False
while True:
flag, l = self.recvLine(clientsocket)
if not flag:
print("[Info] Disonnect: %s" % str(addr))
self.disconnect = True
break
data = self.recvAll(clientsocket, l)
if self.debug: log('[Info] Recv data')
self.queue.put(data)
clientsocket.close()
def update(self):
time.sleep(1)
while not self.queue.empty():
log('update')
data = self.queue.get()
self.main(data)
def main(self, datas):
print(datas)
def __del__(self):
self.serversocket.close()
self.t.join()
|
sanitylib.py | #!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
import traceback
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib # pylint: disable=unused-import
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial or i.get('serial_pty', None):
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None) or self.suite.west_runner
serial_pty = hardware.get('serial_pty', None)
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(serial_pty, stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware['serial']
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
#sometimes QEMU can't handle SIGTERM signal correctly
#in that case kill -9 QEMU process directly and leave
#sanitycheck judge testing result by console output
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
logger.debug(f"No timeout, return code from qemu: {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join()
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
logger.debug(f"return code from qemu: {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
# Global testsuite parameters
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness in [ 'console', 'ztest']:
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = self.testcase.harness_config.get('fixture')
if fixture:
if fixture in fixtures:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
else:
ldflags = cflags = aflags = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
self.suite.build_filtered_tests += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.status}")
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_to_do))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
self.suite.total_errors += 1
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_to_do,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_to_do) * 100),
Fore.YELLOW if self.suite.build_filtered_tests > 0 else Fore.RESET,
self.suite.build_filtered_tests,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan or self.ubsan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_skipped_cases = 0 # number of skipped test cases
self.total_to_do = 0 # number of test instances to be run
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.build_filtered_tests = 0
self.total_passed = 0
self.total_errors = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
# run integration tests only
self.integration = False
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self):
self.total_tests = len(self.instances)
self.total_cases = 0
self.total_skipped = 0
self.total_skipped_cases = 0
self.total_passed = 0
for instance in self.instances.values():
self.total_cases += len(instance.testcase.cases)
if instance.status == 'skipped':
self.total_skipped += 1
self.total_skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
self.total_passed += 1
for res in instance.results.values():
if res == 'SKIP':
self.total_skipped_cases += 1
self.total_to_do = self.total_tests - self.total_skipped
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_passed) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_passed,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases - self.total_skipped_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run - self.total_skipped}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if device_testing_filter:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.build_only = False
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = discards.get(instance, "Not a default test platform")
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
instance.status = "skipped"
instance.fill_results_by_status()
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped', 'error']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception:' % (test,))
for line in traceback.format_exc().splitlines():
logger.error(line)
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if not self.discards:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and not instance.build_only:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
logger.error(f"Unknown status {instance.status}")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(instance.build_only and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (instance.build_only and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' \
or (instance.build_only and instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform, pre_script, is_pty):
device = {
"serial": None,
"platform": platform,
"serial_pty": None,
"counter": 0,
"available": True,
"connected": True,
"pre_script": pre_script
}
if is_pty:
device['serial_pty'] = serial
else:
device['serial'] = serial
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
self.detected.sort(key=lambda x: x['serial'] or '')
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product'] and not h['connected'] and not d.get('match', False):
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
AppInit.py | # coding=utf-8
import multiprocessing
from src.utils.GetPhoneInfo import *
from .AppInit_Android import *
from .AppInit_iOS import *
class AppInit(object):
def __init__(self):
pass
def app_init(self):
sc = ShellCommand()
replace_appium_js = multiprocessing.Process(target=sc.replace_appium_js)
replace_appium_js.start()
device_info = GetPhoneInfo().get_phone_info()
if not device_info:
print(u"ERROR! 未检测到设备,请检查手机链接。")
os._exit(-1)
process = [multiprocessing.Process(target=sc.push_appium_app, args=(k,)) for k in device_info.keys()]
for i in process:
i.start()
for k, v in device_info.items():
if v["platformName"] == "Android":
AppInitAndroid(device_info, k).app_init_android()
elif v["platformName"] == "iOS":
AppInitIos(device_info, k).app_init_ios()
else:
raise KeyError("The phone os is wrong")
replace_appium_js.join()
for i in process:
i.join()
return device_info
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.