source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
netvm.py
|
# coding: UTF-8
# Copyright 2020 Hideto Manjo.
#
# Licensed under the MIT License
"""Network virtual memory module."""
import threading
import socket
from margaret.core.memory import VirtualMemory
from margaret.core.formats import NumpyRawFormat
class NetVM(VirtualMemory):
"""NetVM.
Network virtual memory is virtual memory with a communication
function. Memory data can be sent and received via UDP/IP.
"""
def __init__(self, slot, host="", port=5000):
"""Init."""
super(NetVM, self).__init__(slot)
self.host = host
self.port = port
self._callbacks = [lambda array, addr, slot: True] * slot
def resv(self, slot):
"""Receive
Open the UDP socket and receive the data. The packet receives
only the specified number of bytes and discards the packet if
the number of bytes does not match. If the number of bytes
matches, the reception is successful and the memory is rewritten.
Executes the function specified in the callback.
"""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as soc:
soc.bind((self.host, self.port + slot))
while True:
data, addr = soc.recvfrom(self.read(slot).nbytes + 1)
if len(data) != self.read(slot).nbytes:
continue
shape, dtype = self.shape(slot)
array = NumpyRawFormat.decode(data, shape, dtype)
self.write(slot, array)
self._callbacks[slot](array, addr, slot)
def send(self, slot, host, port, src_port=3000):
"""Send
Sends memory data for the specified slot via a UDP socket.
"""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as soc:
soc.bind((self.host, src_port))
mem = self.read(slot)
soc.sendto(mem, (host, port))
def listen(self):
"""Listen
Start a thread for receiving. The port number will be the
specified port number + slot number.
"""
threads = []
for i in range(self._slot):
th = threading.Thread(target=self.resv, args=(i, ), daemon=True)
threads.append(th)
th.start()
def on(self, slot, callback):
"""Set a callback event for the slot."""
if callable(callback):
self._callbacks[slot] = callback
def off(self, slot):
"""Unsets the slot callback event."""
self._callbacks[slot] = lambda array, addr, slot: True
def info(self):
"""Return info string of the memory."""
message = []
for i, item in enumerate(self.shape()):
message.append(f"slot: {i}, shape: {item[0]}, "
f"dtype: {item[1]}, port: {self.port + i}")
return "\n".join(message)
if __name__ == "__main__":
import time
import numpy as np
N1 = NetVM(3)
N1.set(0, (3, 3), "float32")
N1.set(1, (3, 3), "float32")
N1.set(2, (3, 4), "float32")
N1.write(1, np.ones((3, 3), dtype=np.float32))
N1.write(2, np.ones((3, 4), dtype=np.float32))
def on_resv(array, addr, slot):
print(f"resv slot{slot} {array.nbytes} bytes from {addr[0]}")
N1.on(0, on_resv)
N1.listen()
print("Listening...")
while True:
time.sleep(1)
print("send slot 1 to 127.0.0.1:5000")
N1.send(1, "127.0.0.1", 5000)
print("send slot 2 to 127.0.0.1:5000")
N1.send(2, "127.0.0.1", 5000)
|
tests.py
|
"""
Unit tests for reverse URL lookups.
"""
import sys
import threading
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, RegexURLPattern, RegexURLResolver, Resolver404,
ResolverMatch, get_callable, get_resolver, resolve, reverse, reverse_lazy,
)
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-view-class',
views.view_class_instance, (), {'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, (), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
(
'/included/12/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('42', '37'), {}
),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'inc-ns1', 'inc-ns1:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3',
'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3',
'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
(
'/inc70/', 'inner-nothing', 'included_urls', 'inc-ns5', 'inc-ns5:inner-nothing',
views.empty_view, (), {'outer': '70'}
),
(
'/inc78/extra/foobar/', 'inner-extra', 'included_urls', 'inc-ns5', 'inc-ns5:inner-extra',
views.empty_view, (), {'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], {'year': 2007, 'month': 5, 'day': 21}),
(
'windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [],
{'drive_name': 'C', 'path': r'Documents and Settings\spam'}
),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('test', '/test/1', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import."
):
getattr(resolver, 'url_patterns')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_view_not_found_message(self):
msg = (
"Reverse for 'nonexistent-view' not found. 'nonexistent-view' "
"is not a valid view function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('nonexistent-view')
def test_no_args_message(self):
msg = "Reverse for 'places' with no arguments not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places')
def test_illegal_args_message(self):
msg = "Reverse for 'places' with arguments '(1, 2)' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', args=(1, 2))
def test_illegal_kwargs_message(self):
msg = "Reverse for 'places' with keyword arguments '{'arg1': 2}' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', kwargs={'arg1': 2})
class ResolverTests(SimpleTestCase):
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver('urlpatterns_reverse.named_urls')
self.assertEqual(resolver.reverse('named-url1'), '')
self.assertEqual(resolver.reverse('named-url2', 'arg'), 'extra/arg/')
self.assertEqual(resolver.reverse('named-url2', extra='arg'), 'extra/arg/')
def test_resolver_reverse_conflict(self):
"""
url() name arguments don't need to be unique. The last registered
pattern takes precedence for conflicting names.
"""
resolver = get_resolver('urlpatterns_reverse.named_urls_conflict')
# Without arguments, the last URL in urlpatterns has precedence.
self.assertEqual(resolver.reverse('name-conflict'), 'conflict/')
# With an arg, the last URL in urlpatterns has precedence.
self.assertEqual(resolver.reverse('name-conflict', 'arg'), 'conflict-last/arg/')
# With a kwarg, other url()s can be reversed.
self.assertEqual(resolver.reverse('name-conflict', first='arg'), 'conflict-first/arg/')
self.assertEqual(resolver.reverse('name-conflict', middle='arg'), 'conflict-middle/arg/')
self.assertEqual(resolver.reverse('name-conflict', last='arg'), 'conflict-last/arg/')
# The number and order of the arguments don't interfere with reversing.
self.assertEqual(resolver.reverse('name-conflict', 'arg', 'arg'), 'conflict/arg/arg/')
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occurring later (#10834).
"""
with self.assertRaises(Resolver404):
resolve('')
with self.assertRaises(Resolver404):
resolve('a')
with self.assertRaises(Resolver404):
resolve('\\')
with self.assertRaises(Resolver404):
resolve('.')
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a nonexistent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/nonexistent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
with self.assertRaisesMessage(Resolver404, 'tried') as cm:
resolve('/included/nonexistent-url', urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
self.assertEqual(
len(e.args[0]['tried']),
len(url_types_names),
'Wrong number of tried URLs returned. Expected %s, got %s.' % (
len(url_types_names), len(e.args[0]['tried'])
)
)
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), '%s is not an instance of %s' % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(
t.name,
e['name'],
'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)
)
def test_namespaced_view_detail(self):
resolver = get_resolver('urlpatterns_reverse.nested_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view1'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view2'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.View3'))
self.assertFalse(resolver._is_callback('urlpatterns_reverse.nested_urls.blub'))
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver('urlpatterns_reverse.method_view_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.method_view'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view'))
def test_populate_concurrency(self):
"""
RegexURLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = RegexURLResolver(r'^/', 'urlpatterns_reverse.urls')
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.force_login(alfred)
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
self.write_settings('settings.py', extra="""
from django.urls import reverse_lazy
LOGIN_URL = reverse_lazy('login')""")
def tearDown(self):
self.remove_settings('settings.py')
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj:
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
with self.assertRaises(NoReverseMatch):
redirect('not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view')
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view', args=[37, 42])
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing')
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing', args=[37, 42])
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing', kwargs={'arg1': 42, 'arg2': 37})
def test_non_existent_namespace(self):
"Nonexistent namespaces raise errors"
with self.assertRaises(NoReverseMatch):
reverse('blahblah:urlobject-view')
with self.assertRaises(NoReverseMatch):
reverse('test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37, 42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('included_namespace_urls:inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('included_namespace_urls:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/included/normal/42/37/',
reverse('included_namespace_urls:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/included/+%5C$*/', reverse('included_namespace_urls:inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_app_object(self):
"Dynamic URL objects can return a (pattern, app_name) 2-tuple, and include() can set the namespace"
self.assertEqual('/newapp1/inner/', reverse('new-ns1:urlobject-view'))
self.assertEqual('/newapp1/inner/37/42/', reverse('new-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/newapp1/inner/42/37/', reverse('new-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/newapp1/inner/+%5C$*/', reverse('new-ns1:urlobject-special-view'))
def test_app_object_default_namespace(self):
"Namespace defaults to app_name when including a (pattern, app_name) 2-tuple"
self.assertEqual('/new-default/inner/', reverse('newapp:urlobject-view'))
self.assertEqual('/new-default/inner/37/42/', reverse('newapp:urlobject-view', args=[37, 42]))
self.assertEqual(
'/new-default/inner/42/37/', reverse('newapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/new-default/inner/+%5C$*/', reverse('newapp:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('included_namespace_urls:test-ns3:urlobject-view'))
self.assertEqual(
'/included/test3/inner/37/42/', reverse('included_namespace_urls:test-ns3:urlobject-view', args=[37, 42])
)
self.assertEqual(
'/included/test3/inner/42/37/',
reverse('included_namespace_urls:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual(
'/included/test3/inner/+%5C$*/', reverse('included_namespace_urls:test-ns3:urlobject-special-view')
)
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_app_name_pattern(self):
"Namespaces can be applied to include()'d urlpatterns that set an app_name attribute"
self.assertEqual('/app-included1/normal/', reverse('app-ns1:inc-normal-view'))
self.assertEqual('/app-included1/normal/37/42/', reverse('app-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/app-included1/normal/42/37/', reverse('app-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/app-included1/+%5C$*/', reverse('app-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using an include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual(
'/ns-outer/42/normal/37/4/',
reverse('inc-outer:inc-normal-view', kwargs={'outer': 42, 'arg1': 37, 'arg2': 4})
)
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view')
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/37/42/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37, 42])
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view')
)
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual(
'/default/inner/37/42/',
reverse('testapp:urlobject-view', args=[37, 42], current_app='test-ns3')
)
self.assertEqual(
'/default/inner/42/37/',
reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='test-ns3')
)
self.assertEqual(
'/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3')
)
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual(
'/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42], current_app='other-ns1')
)
self.assertEqual(
'/other1/inner/42/37/',
reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='other-ns1')
)
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:included_namespace_urls:inc-normal-view'))
self.assertEqual(
'/+%5C$*/included/normal/37/42/',
reverse('special:included_namespace_urls:inc-normal-view', args=[37, 42])
)
self.assertEqual(
'/+%5C$*/included/normal/42/37/',
reverse('special:included_namespace_urls:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:included_namespace_urls:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual(
'/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer': '78', 'extra': 'foobar'})
)
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78', 'foobar']))
def test_nested_app_lookup(self):
"A nested current_app should be split in individual namespaces (#24904)"
self.assertEqual('/ns-included1/test4/inner/', reverse('inc-ns1:testapp:urlobject-view'))
self.assertEqual('/ns-included1/test4/inner/37/42/', reverse('inc-ns1:testapp:urlobject-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/test4/inner/+%5C$*/', reverse('inc-ns1:testapp:urlobject-special-view'))
self.assertEqual(
'/ns-included1/test3/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='inc-ns1:test-ns3')
)
def test_current_app_no_partial_match(self):
"current_app should either match the whole path or shouldn't be used"
self.assertEqual(
'/ns-included1/test4/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='nonexistent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='nonexistent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37},
current_app='nonexistent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='nonexistent:test-ns3')
)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve_error_handler(400), handler)
self.assertEqual(self.resolver.resolve_error_handler(404), handler)
self.assertEqual(self.resolver.resolve_error_handler(500), handler)
def test_callable_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve_error_handler(400), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(404), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(500), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_full_import')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
with self.assertRaisesMessage(ValueError, "I don't think I'm getting good"):
self.client.get('/bad_view/')
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
with self.assertRaises(ImproperlyConfigured):
self.client.get('/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, 'view must be a callable'):
url(r'uncallable-object/$', views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
# passing a callable should return the callable
self.assertEqual(get_callable(empty_view), empty_view)
def test_exceptions(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
with self.assertRaisesMessage(ViewDoesNotExist, "View does not exist in"):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
with self.assertRaises(AttributeError):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
class IncludeTests(SimpleTestCase):
url_patterns = [
url(r'^inner/$', views.empty_view, name='urlobject-view'),
url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
"Specifying a namespace in django.conf.urls.include() without "
"providing an app_name is not supported."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, 'namespace')
def test_include_4_tuple(self):
msg = 'Passing a 4-tuple to django.conf.urls.include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace', 'blah'))
def test_include_3_tuple(self):
msg = 'Passing a 3-tuple to django.conf.urls.include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'))
def test_include_3_tuple_namespace(self):
msg = 'Cannot override the namespace for a dynamic module that provides a namespace.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'), 'namespace')
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
match = resolve(test_url)
self.assertEqual(match.kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
url = reverse('lookahead-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead+/a-city/')
url = reverse('lookahead-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead-/a-city/')
url = reverse('lookbehind-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind+/a-city/')
url = reverse('lookbehind-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind-/a-city/')
def test_invalid_reverse(self):
with self.assertRaises(NoReverseMatch):
reverse('lookahead-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookahead-negative', kwargs={'city': 'not-a-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-negative', kwargs={'city': 'not-a-city'})
|
BayesianDesigner.py
|
from functools import partial
from threading import Thread,Condition
import copy
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.stats import norm as scipynorm
from matplotlib import pyplot as plt
from .PawsPlugin import PawsPlugin
class BayesianDesigner(PawsPlugin):
"""Design tool employing Bayesian Optimization on a Gaussian Process prior."""
def __init__(self,
strategy='MPI',strategic_params={'exploration_incentive':0.},
noise_sd=0.,x_domain={},targets={},constraints={},
range_constraints={},categorical_constraints={},
covariance_kernel='sq_exp',covariance_kernel_params={'width':1.},
MC_max_iter=1000,MC_alpha=1.,
verbose=False,log_file=None):
"""Create a BayesianDesigner.
Parameters
----------
strategy : str
optimization strategy (currently only 'MPI' is supported).
'MPI' seeks to Maximize the Probability of Improvement for all targets,
while (jointly) maximizing the likelihood of satisfying all constraints.
strategic_params : dict
parameters that affect the optimization strategy
(currently only 'exploration_incentive' is supported)
noise_sd : float
standard deviation of observation noise- if greater than zero,
the diagonal covariance elements are augmented by `noise_sd`**2.
x_domain : dict
dict of input column names and corresponding [min,max] lists
targets : dict
dict of output names (keys) and target specifiers
(either 'minimize' or 'maximize')
constraints : dict
dict of output names (keys) and real-valued targets (values)
range_constraints: dict
dict of output names (keys) and [min,max] constraint ranges (values)-
specify open intervals by setting min or max to None.
categorical_constraints : dict
dict of output names (keys) and categorical targets (values)
covariance_kernel : str
choice of covariance kernel- currently either 'inv_exp' or 'sq_exp'
covariance_kernel_params : dict
dict of covariance kernel parameters (currently only 'width' is supported)
MC_max_iter : int
number of Monte Carlo iterations for optimizing acquisition function
MC_alpha : int
scaling factor for Monte Carlo random steps
verbose : bool
log_file : str
"""
super(BayesianDesigner,self).__init__(verbose=verbose,log_file=log_file)
self.strategy = strategy
self.strat_params = strategic_params
self.noise_sd = noise_sd
self.x_domain = x_domain
self.targets = targets
self.constraints = constraints
self.range_constraints = range_constraints
self.categorical_constraints = categorical_constraints
self.covariance_kernel = covariance_kernel
self.cov_params = covariance_kernel_params
self.MC_max_iter = MC_max_iter
self.MC_alpha = MC_alpha
self.modeling_lock = Condition()
self.candidate_lock = Condition()
self._candidates = []
self.dataset = None
# TODO: check targets and constraints for redundant keys,
# raise an exception if any are found
@staticmethod
def _sq_exp_kernel(x1,x2,width):
return np.exp(-np.sum((x2-x1)**2)/(2*width**2))
@staticmethod
def _inv_exp_kernel(x1,x2,width):
return np.exp(-np.linalg.norm(x2-x1)/(width))
def cov_kernel(self,x1,x2):
if self.covariance_kernel == 'sq_exp':
return self._sq_exp_kernel(x1,x2,self.cov_params['width'])
elif self.covariance_kernel == 'inv_exp':
return self._inv_exp_kernel(x1,x2,self.cov_params['width'])
else:
raise ValueError('invalid kernel specification: {}'.format(self.covariance_kernel))
def add_samples(self,*args):
with self.modeling_lock:
for sampl in args:
self.dataset = self.dataset.append(sampl,ignore_index=True)
self.set_data()
def set_data(self,df=None):
if self.verbose: self.message_callback('LOCKING AND SETTING UP MODEL')
with self.modeling_lock:
self.dataset = df
# standardize x values
self.x_scaler = MinMaxScaler()
x_keys = list(self.x_domain.keys())
x_domain_df = pd.DataFrame(self.x_domain,columns=x_keys)
self.x_scaler.fit(x_domain_df)
self.xs_domain_df = pd.DataFrame(self.x_scaler.transform(x_domain_df),columns=x_keys)
self.xs_df = pd.DataFrame(self.x_scaler.transform(df[x_keys]),columns=x_keys)
# build the covariance matrix, save its inverse
nx = self.xs_df.shape[0]
self.cov_mat = np.array([[
self.cov_kernel(self.xs_df.loc[ix1,:],self.xs_df.loc[ix2,:])
for ix2 in range(nx)] for ix1 in range(nx)])
if self.noise_sd > 0.:
self.cov_mat += self.noise_sd**2*np.eye(self.cov_mat.shape[0])
self.inv_cov_mat = np.linalg.inv(self.cov_mat)
self._set_target_data()
if self.verbose: self.message_callback('MODEL SETUP COMPLETE!')
def _set_target_data(self):
# dicts for holding scalers, scaled values, gp model surrogates, incumbents,
# index filters, and index-filtered inverse covariance matrices
self.y_scalers = {}
self.y_arrays = {}
self.ys_arrays = {}
self.gp_arrays = {}
self.gp_range_constraints = {}
self.gp_categorical_constraints = {}
self.gp_incumbents = {}
self.filter_flags = {}
self.good_idxs = {}
self.filtered_cov_mats = {}
self.filtered_inv_cov_mats = {}
# model zero-centered integers (-1 and 1) for all categorical constraints
for y_key, y_cat in self.categorical_constraints.items():
y_array = np.array(self.dataset[y_key],dtype=int)
good_idx = np.invert(np.isnan(y_array))
self.good_idxs[y_key] = good_idx
y_array = y_array[good_idx].reshape(-1,1)
self.y_arrays[y_key] = y_array
self.ys_arrays[y_key] = y_array
self.gp_arrays[y_key] = copy.deepcopy(y_array)
# let the decision boundary lie at zero: set False labels to -1
self.gp_arrays[y_key][self.ys_arrays[y_key]==0]=-1
self.gp_categorical_constraints[y_key] = 1
if not bool(y_cat):
self.gp_categorical_constraints[y_key] = -1
self.filter_flags[y_key] = not self.good_idxs[y_key].all()
if self.filter_flags[y_key]:
# TODO: check if any self.good_idxs match,
# and if so, use the corresponding self.filtered_inv_cov_mats,
# instead of computing yet another inverse
self.filtered_cov_mats[y_key] = self.cov_mat[good_idx,:][:,good_idx]
self.filtered_inv_cov_mats[y_key] = np.linalg.inv(self.filtered_cov_mats[y_key])
# model standardized values for all range constraints
for y_key, y_range in self.range_constraints.items():
y_array = np.array(self.dataset[y_key])
good_idx = np.invert(np.isnan(y_array))
self.good_idxs[y_key] = good_idx
y_array = y_array[good_idx].reshape(-1,1)
self.y_arrays[y_key] = y_array
self.y_scalers[y_key] = StandardScaler()
self.y_scalers[y_key].fit(y_array)
self.ys_arrays[y_key] = self.y_scalers[y_key].transform(y_array)
self.gp_arrays[y_key] = self.ys_arrays[y_key]
self.gp_range_constraints[y_key] = [None,None]
if y_range[0] is not None:
self.gp_range_constraints[y_key][0] = \
self.y_scalers[y_key].transform(np.array(y_range[0]).reshape(-1,1))[0,0]
if y_range[1] is not None:
self.gp_range_constraints[y_key][1] = \
self.y_scalers[y_key].transform(np.array(y_range[1]).reshape(-1,1))[0,0]
self.filter_flags[y_key] = not self.good_idxs[y_key].all()
if self.filter_flags[y_key]:
self.filtered_cov_mats[y_key] = self.cov_mat[good_idx,:][:,good_idx]
self.filtered_inv_cov_mats[y_key] = np.linalg.inv(self.filtered_cov_mats[y_key])
# model exact value constraints
# by the likelihood to optimize the error
# relative to the incumbent best sample
# TODO: how to use self.strategy here?
for y_key, y_val in self.constraints.items():
y_array = np.array(self.dataset[y_key])
good_idx = np.invert(np.isnan(y_array))
self.good_idxs[y_key] = good_idx
y_array = y_array[good_idx].reshape(-1,1)
self.y_arrays[y_key] = y_array
self.y_scalers[y_key] = StandardScaler()
self.y_scalers[y_key].fit(y_array)
y_scaled = np.array(self.y_scalers[y_key].transform(y_array))
self.ys_arrays[y_key] = y_scaled
self.gp_arrays[y_key] = y_scaled
y_diff_sqr = (y_array-y_val)**2
self.gp_incumbents[y_key] = self.gp_arrays[y_key][np.argmin(y_diff_sqr)][0]
self.filter_flags[y_key] = not self.good_idxs[y_key].all()
if self.filter_flags[y_key]:
self.filtered_cov_mats[y_key] = self.cov_mat[good_idx,:][:,good_idx]
self.filtered_inv_cov_mats[y_key] = np.linalg.inv(self.filtered_cov_mats[y_key])
# model targets by the likelihood to optimize (min or max)
# relative to the incumbent best sample,
# in the context of self.strategy
for y_key, targ_spec in self.targets.items():
y_array = np.array(self.dataset[y_key])
good_idx = np.invert(np.isnan(y_array))
self.good_idxs[y_key] = good_idx
y_array = y_array[good_idx].reshape(-1,1)
self.y_scalers[y_key] = StandardScaler()
self.y_scalers[y_key].fit(y_array)
self.ys_arrays[y_key] = self.y_scalers[y_key].transform(y_array)
self.gp_arrays[y_key] = self.ys_arrays[y_key]
#if self.x_cov_noise > 0.:
# gp_preds = []
# for ix in self.xs_df.index:
# xs = np.array(self.xs_df.loc[ix,:])
# cov_vec = self.cov_vector(xs)
# gp_preds.append(self._gp_mean(cov_vec,self.gp_arrays[y_key]))
# gp_preds = np.array(gp_preds)
#else:
if targ_spec == 'minimize':
self.gp_incumbents[y_key] = np.min(self.gp_arrays[y_key])
elif targ_spec == 'maximize':
self.gp_incumbents[y_key] = np.max(self.gp_arrays[y_key])
else:
raise ValueError('unsupported target for {}: {}'
.format(y_key,target_spec))
self.filter_flags[y_key] = not self.good_idxs[y_key].all()
if self.filter_flags[y_key]:
self.filtered_cov_mats[y_key] = self.cov_mat[good_idx,:][:,good_idx]
self.filtered_inv_cov_mats[y_key] = np.linalg.inv(self.filtered_cov_mats[y_key])
def set_targets(self,**kwargs):
with self.modeling_lock:
for y_key,targ_spec in kwargs.items():
if not y_key in self.targets:
raise KeyError('target key {} does not exist'.format(y_key))
self.targets[y_key] = targ_spec
self._set_target_data()
if self.verbose: self.message_callback('targets set: {}'.format(kwargs))
def set_constraints(self,**kwargs):
with self.modeling_lock:
for y_key,y_val in kwargs.items():
if not y_key in self.constraints:
raise KeyError('constraint key {} does not exist'.format(y_key))
self.constraints[y_key] = y_val
self._set_target_data()
if self.verbose: self.message_callback('constraints set: {}'.format(kwargs))
def set_categorical_constraints(self,**kwargs):
with self.modeling_lock:
for y_key,y_val in kwargs.items():
if not y_key in self.categorical_constraints:
raise KeyError('categorical constraint key {} does not exist'.format(y_key))
self.categorical_constraints[y_key] = y_cat
self._set_target_data()
if self.verbose: self.message_callback('categorical constraints set: {}'.format(kwargs))
def set_range_constraints(self,**kwargs):
with self.modeling_lock:
for y_key,y_val in kwargs.items():
if not y_key in self.range_constraints:
raise KeyError('range constraint key {} does not exist'.format(y_key))
self.range_constraints[y_key] = y_range
self._set_target_data()
if self.verbose: self.message_callback('range constraints set: {}'.format(kwargs))
def cov_vector(self,xs,idx_filter=None):
if idx_filter is None:
xs_idx = np.array(self.xs_df.index)
else:
xs_idx = np.array(self.xs_df.index)[idx_filter]
covv = np.array([self.cov_kernel(xs,self.xs_df.loc[ix,:]) for ix in xs_idx])
return covv
def _gp_var(self,cov_vector,inv_cov_mat):
self_cov = 1.
if self.noise_sd > 0.:
self_cov = 1.+self.noise_sd**2
return self_cov-np.dot(np.dot(cov_vector,inv_cov_mat),cov_vector)
def _gp_mean(self,cov_vector,inv_cov_mat,y_vector):
return np.dot(np.dot(cov_vector,inv_cov_mat),y_vector)
def _compute_cov(self,xs,y_key=None):
if y_key:
covvec = self.cov_vector(xs,self.good_idxs[y_key])
gp_var = self._gp_var(covvec,self.filtered_inv_cov_mats[y_key])
else:
covvec = self.cov_vector(xs)
gp_var = self._gp_var(covvec,self.inv_cov_mat)
if gp_var <= 0.:
gp_sd = 0.
else:
gp_sd = np.sqrt(gp_var)
return covvec,gp_var,gp_sd
def predict_outputs(self,xs):
preds = {}
gp_preds = {}
gp_scores = {}
with self.modeling_lock:
# get covariance and gp predictions without filters
covvec_all,gp_var_all,gp_sd_all = self._compute_cov(xs)
# evaluate categoricals
for y_key,gp_cat in self.gp_categorical_constraints.items():
if self.filter_flags[y_key]:
covvec_y,gp_var_y,gp_sd_y = self._compute_cov(xs,y_key)
inv_cov_y = self.filtered_inv_cov_mats[y_key]
else:
covvec_y,gp_var_y,gp_sd_y = covvec_all,gp_var_all,gp_sd_all
inv_cov_y = self.inv_cov_mat
gp_mean_y = self._gp_mean(covvec_y,inv_cov_y,self.gp_arrays[y_key])[0]
gp_preds[y_key] = [gp_mean_y,gp_sd_y]
# decision boundary is zero, and the likelihood is the cdf above/below zero
pred = bool(gp_mean_y>0)
if pred:
proba = float(1.-scipynorm.cdf(0.,gp_mean_y,gp_sd_y))
else:
proba = float(scipynorm.cdf(0.,gp_mean_y,gp_sd_y))
preds[y_key] = [pred,proba]
gp_scores[y_key] = self.categorical_probability(gp_cat,gp_mean_y,gp_sd_y)
# evaluate targets
for y_key, targ_spec in self.targets.items():
if self.filter_flags[y_key]:
covvec_y,gp_var_y,gp_sd_y = self._compute_cov(xs,y_key)
inv_cov_y = self.filtered_inv_cov_mats[y_key]
else:
covvec_y,gp_var_y,gp_sd_y = covvec_all,gp_var_all,gp_sd_all
inv_cov_y = self.inv_cov_mat
gp_mean_y = self._gp_mean(covvec_y,inv_cov_y,self.gp_arrays[y_key])[0]
gp_preds[y_key] = [gp_mean_y,gp_sd_y]
ys_mean = self._gp_mean(covvec_y,inv_cov_y,self.ys_arrays[y_key])[0]
mean = self.y_scalers[y_key].inverse_transform(np.array(ys_mean).reshape(-1,1))[0,0]
sd = gp_sd_y*self.y_scalers[y_key].scale_[0]
preds[y_key] = [mean,sd]
gp_incumb = self.gp_incumbents[y_key]
expl_inc = self.strat_params['exploration_incentive']
gp_scores[y_key] = self.improvement_probability(
targ_spec,gp_incumb,gp_mean_y,gp_sd_y,expl_inc)
# evaluate constraints
for y_key, y_con in self.constraints.items():
if self.filter_flags[y_key]:
covvec_y,gp_var_y,gp_sd_y = self._compute_cov(xs,y_key)
inv_cov_y = self.filtered_inv_cov_mats[y_key]
else:
covvec_y,gp_var_y,gp_sd_y = covvec_all,gp_var_all,gp_sd_all
inv_cov_y = self.inv_cov_mat
gp_mean_y = self._gp_mean(covvec_y,inv_cov_y,self.gp_arrays[y_key])[0]
gp_preds[y_key] = [gp_mean_y,gp_sd_y]
ys_mean = self._gp_mean(covvec_y,inv_cov_y,self.ys_arrays[y_key])[0]
mean = self.y_scalers[y_key].inverse_transform(np.array(ys_mean).reshape(-1,1))[0,0]
sd = gp_sd_y*self.y_scalers[y_key].scale_[0]
preds[y_key] = [mean,sd]
#expl_inc = self.strat_params['exploration_incentive']
# TODO: think about how to incorporate exploration incentive here
ys_con = self.y_scalers[y_key].transform(np.array(y_con).reshape(-1,1))[0,0]
gp_incumb = self.gp_incumbents[y_key]
incumb_abserr = np.abs(gp_incumb-ys_con)
gp_rc = [ys_con-incumb_abserr,ys_con+incumb_abserr]
range_val = self.range_probability(gp_rc,gp_mean_y,gp_sd_y)
max_range_val = 2*incumb_abserr/np.sqrt(2.*np.pi*gp_sd_y**2)
acq_val = range_val/max_range_val
gp_scores[y_key] = acq_val
# evaluate range constraints
for y_key,gp_rc in self.gp_range_constraints.items():
if self.filter_flags[y_key]:
covvec_y,gp_var_y,gp_sd_y = self._compute_cov(xs,y_key)
inv_cov_y = self.filtered_inv_cov_mats[y_key]
else:
covvec_y,gp_var_y,gp_sd_y = covvec_all,gp_var_all,gp_sd_all
inv_cov_y = self.inv_cov_mat
gp_mean_y = self._gp_mean(covvec_y,inv_cov_y,self.gp_arrays[y_key])[0]
gp_preds[y_key] = [gp_mean_y,gp_sd_y]
ys_mean = self._gp_mean(covvec_y,inv_cov_y,self.ys_arrays[y_key])[0]
mean = self.y_scalers[y_key].inverse_transform(np.array(ys_mean).reshape(-1,1))[0,0]
sd = gp_sd_y*self.y_scalers[y_key].scale_[0]
preds[y_key] = [mean,sd]
gp_scores[y_key] = self.range_probability(gp_rc,gp_mean_y,gp_sd_y)
return preds,gp_preds,gp_scores
def plot_outputs(self,gp_preds,gp_scores):
# cats
for y_key,gp_cat in self.gp_categorical_constraints.items():
print('{} score: {}'.format(y_key,gp_scores[y_key]))
self.plot_distrib(y_key,gp_preds[y_key][0],gp_preds[y_key][1],boundary=0.,target=gp_cat)
# ranges
for y_key,gp_rc in self.gp_range_constraints.items():
print('{} score: {}'.format(y_key,gp_scores[y_key]))
self.plot_distrib(y_key,gp_preds[y_key][0],gp_preds[y_key][1],lower=gp_rc[0],upper=gp_rc[1])
# vals
for y_key, y_con in self.constraints.items():
print('{} score: {}'.format(y_key,gp_scores[y_key]))
ys_con = self.y_scalers[y_key].transform(np.array(y_con).reshape(-1,1))[0,0]
incumb = self.gp_incumbents[y_key]
incumb_abserr = np.abs(incumb-ys_con)
tgt_range = [ys_con-incumb_abserr,ys_con+incumb_abserr]
self.plot_distrib(y_key,gp_preds[y_key][0],gp_preds[y_key][1],
lower=tgt_range[0],target=ys_con,upper=tgt_range[1])
# targs
for y_key, targ_spec in self.targets.items():
print('{} score: {}'.format(y_key,gp_scores[y_key]))
incumb = self.gp_incumbents[y_key]
self.plot_distrib(y_key,gp_mean,gp_sd,incumbent=incumb)
def _joint_acq_func(self,xs):
acq_vals = []
# get covariance and gp predictions without filters
covvec_all,gp_var_all,gp_sd_all = self._compute_cov(xs)
# compute acq. value for each categorical constraint:
# this should be the likelihood of the prediction
# to project onto the specified category
for y_key,gp_cat in self.gp_categorical_constraints.items():
if self.filter_flags[y_key]:
covvec_y,gp_var_y,gp_sd_y = self._compute_cov(xs,y_key)
inv_cov_y = self.filtered_inv_cov_mats[y_key]
else:
covvec_y,gp_var_y,gp_sd_y = covvec_all,gp_var_all,gp_sd_all
inv_cov_y = self.inv_cov_mat
gp_mean_y = self._gp_mean(covvec_y,inv_cov_y,self.gp_arrays[y_key])[0]
acq_val = self.categorical_probability(gp_cat,gp_mean_y,gp_sd_y)
acq_vals.append(acq_val)
# compute acq. value for each target:
# this should be the likelihood of optimizing the target,
# in the context of self.strategy
for y_key,target_spec in self.targets.items():
if self.filter_flags[y_key]:
covvec_y,gp_var_y,gp_sd_y = self._compute_cov(xs,y_key)
inv_cov_y = self.filtered_inv_cov_mats[y_key]
else:
covvec_y,gp_var_y,gp_sd_y = covvec_all,gp_var_all,gp_sd_all
inv_cov_y = self.inv_cov_mat
gp_mean_y = self._gp_mean(covvec_y,inv_cov_y,self.gp_arrays[y_key])[0]
expl_inc = self.strat_params['exploration_incentive']
gp_incumb = self.gp_incumbents[y_key]
if self.strategy == 'MPI':
acq_val = self.improvement_probability(
target_spec,gp_incumb,gp_mean_y,gp_sd_y,expl_inc)
acq_vals.append(acq_val)
else:
raise ValueError('optimization strategy {} not supported'.format(self.strategy))
# compute acq. value for each constraint:
# this should be the likelihood of optimizing
# the value relative to the incumbent
for y_key,y_con in self.constraints.items():
if self.filter_flags[y_key]:
covvec_y,gp_var_y,gp_sd_y = self._compute_cov(xs,y_key)
inv_cov_y = self.filtered_inv_cov_mats[y_key]
else:
covvec_y,gp_var_y,gp_sd_y = covvec_all,gp_var_all,gp_sd_all
inv_cov_y = self.inv_cov_mat
gp_mean_y = self._gp_mean(covvec_y,inv_cov_y,self.gp_arrays[y_key])[0]
ys_con = self.y_scalers[y_key].transform(np.array(y_con).reshape(-1,1))[0,0]
gp_incumb = self.gp_incumbents[y_key]
incumb_abserr = np.abs(gp_incumb-ys_con)
gp_rc = [ys_con-incumb_abserr,ys_con+incumb_abserr]
range_val = self.range_probability(gp_rc,gp_mean_y,gp_sd_y)
max_range_val = 2*incumb_abserr/np.sqrt(2.*np.pi*gp_sd_y**2)
acq_val = range_val/max_range_val
acq_vals.append(acq_val)
# compute acq. value for each range constraint:
# this should be the likelihood of the prediction
# to fall within the range of the constraint
for y_key,gp_rc in self.gp_range_constraints.items():
if self.filter_flags[y_key]:
covvec_y,gp_var_y,gp_sd_y = self._compute_cov(xs,y_key)
inv_cov_y = self.filtered_inv_cov_mats[y_key]
else:
covvec_y,gp_var_y,gp_sd_y = covvec_all,gp_var_all,gp_sd_all
inv_cov_y = self.inv_cov_mat
gp_mean_y = self._gp_mean(covvec_y,inv_cov_y,self.gp_arrays[y_key])[0]
acq_val = self.range_probability(gp_rc,gp_mean_y,gp_sd_y)
acq_vals.append(acq_val)
return np.product(acq_vals)
def improvement_probability(self,target_spec,incumb,gp_mean,gp_sd,expl_inc):
if target_spec == 'maximize':
ztarg = (gp_mean-incumb-expl_inc)/gp_sd
elif target_spec == 'minimize':
ztarg = (incumb-gp_mean-expl_inc)/gp_sd
acq_val = scipynorm.cdf(ztarg)
return acq_val
def range_probability(self,gp_range,gp_mean,gp_sd):
cdf_ub = 1.
if gp_range[1] is not None:
cdf_ub = scipynorm.cdf(gp_range[1],gp_mean,gp_sd)
cdf_lb = 0.
if gp_range[0] is not None:
cdf_lb = scipynorm.cdf(gp_range[0],gp_mean,gp_sd)
acq_val = cdf_ub - cdf_lb
return acq_val
def categorical_probability(self,gp_cat,gp_mean,gp_sd):
if gp_cat==1:
# get the probability of a value greater than zero
acq_val = 1.-scipynorm.cdf(0,gp_mean,gp_sd)
else:
# get the probability of a value less than zero
acq_val = scipynorm.cdf(0,gp_mean,gp_sd)
return acq_val
def optimize_candidate(self):
if self.verbose: self.message_callback('starting candidate optimization')
# take the running_lock and wait on it-
# this ensures that the optimization method
# gets priority to take the modeling_lock
with self.running_lock:
opt_thread = Thread(target=self._optimize_candidate)
opt_thread.start()
self.running_lock.wait()
def get_next_candidate(self):
if self.verbose: self.message_callback('fetching candidate...')
with self.candidate_lock:
while not self._candidates:
if self.verbose: self.message_callback(
'no candidates- waiting...')
# wait() to release candiate_lock and await notification
self.candidate_lock.wait()
cand_data = self._candidates.pop(0)
#####
xs = cand_data['xs_array']
preds,gp_preds,gp_scores = self.predict_outputs(xs)
cand_data.update(prediction=preds,gp_prediction=gp_preds,scores=gp_scores)
#####
return cand_data
def _optimize_candidate(self):
with self.modeling_lock:
if self.verbose:
self.message_callback('LOCKING MODEL AND SEEKING CANDIDATE')
# the thread that launched this method
# should be waiting for a self.run_notify(),
# to avoid accidentally stealing the modeling_lock
self.run_notify()
xs_opt, acq_val = self._optimize_acq()
# inverse-transform the optimized candidate
x_opt = self.x_scaler.inverse_transform(xs_opt.reshape(1,-1))[0]
# store the optimized candidate in a dict, keyed by column names
cand_opt = dict([(xk,xval) for xk,xval in zip(self.xs_df.columns,x_opt)])
cand_opt_s = dict([(xk,xsval) for xk,xsval in zip(self.xs_df.columns,xs_opt)])
candidate_data = {'targets':copy.deepcopy(self.targets),\
'constraints':copy.deepcopy(self.constraints),\
'range_constraints':copy.deepcopy(self.range_constraints),\
'categorical_constraints':copy.deepcopy(self.categorical_constraints),\
'candidate':cand_opt,'scaled_candidate':cand_opt_s,\
'xs_array':xs_opt,'acquisition_value':acq_val}
with self.candidate_lock:
self._candidates.append(candidate_data)
# send notification in case there is a thread waiting for the result
self.candidate_lock.notify()
def _optimize_acq(self):
if self.verbose:
self.message_callback('starting {} MC iterations at alpha={}'
.format(self.MC_max_iter,self.MC_alpha))
# start with a random value
#xs_best = np.array([np.random.rand(1)[0] for xkey in self.x_domain.keys()])
# start on a grid or on the best-yet training set point-
# TODO: add inputs for selecting start point strategy
ts_acq = [self._joint_acq_func(np.array(self.xs_df.loc[ix,:])) for ix in self.xs_df.index]
ts_acq = np.array(ts_acq)
ibest = np.argmax(ts_acq)
xs_best = np.array(self.xs_df.iloc[ibest,:])
obj_best = self._joint_acq_func(xs_best)
xs_current = xs_best
obj_current = obj_best
n_acc = 0
for iit in range(1,self.MC_max_iter+1):
# apply a random change
delta_xs = 2*self.MC_alpha*np.array([np.random.rand(1)[0]-0.5 for xx in xs_current])
xs_new = xs_current + delta_xs
# if it violates the domain, reject
if any([xn<0. or xn>1. for xn in xs_new]):
obj_new = 0.
#print('DOMAIN VIOLATION: REJECT')
else:
# evaluate the objective
obj_new = self._joint_acq_func(xs_new)
# if the objective goes up, keep it
if (obj_new > obj_current):
#print('IMPROVEMENT: {} --> {}'.format(obj_current,obj_new))
xs_current = xs_new
obj_current = obj_new
n_acc += 1
# if this is the best yet, save it
if (obj_new > obj_best):
#print('*** NEW BEST: {} --> {}'.format(obj_best,obj_new))
xs_best = xs_new
obj_best = obj_new
else:
# if the objective goes down, make a stochastic decision
bdry = np.random.rand(1)[0]
#print('PROPOSAL: {} --> {}'.format(obj_current,obj_new))
if (obj_current <= 0.) or (obj_new/obj_current > bdry):
#print('ACCEPTED: {} > {}'.format(obj_new/obj_current,bdry))
#print('ACCEPTED: objective {} -> {}'.format(obj_current,obj_new))
xs_current = xs_new
obj_current = obj_new
n_acc += 1
if (np.mod(iit,1000)==0) or (iit<1000 and np.mod(iit,100)==0):
# check the acceptance ratio
ac_ratio = float(n_acc)/iit
if self.verbose:
self.message_callback('iter {}/{}- acceptance ratio: {}, best value: {}'.format(
iit,self.MC_max_iter,ac_ratio,obj_best))
return xs_best, obj_best
def plot_distrib(self,y_key,gp_mean,gp_sd,**kwargs):
print('target: {}'.format(y_key))
print('prediction: {} (sd: {}))'.format(gp_mean,gp_sd))
gp_range = np.linspace(gp_mean-6.*gp_sd,gp_mean+6.*gp_sd,num=100)
gp_pdf = scipynorm.pdf(gp_range,gp_mean,gp_sd)*(self.gp_arrays[y_key].shape[0])
plt.figure()
plt.plot(gp_range,gp_pdf,'r')
kwkeys = list(kwargs.keys())
plotkeys = []
for kwkey in kwkeys:
kwval = kwargs[kwkey]
if kwval is not None:
plotkeys.append(kwkey)
plt.axvline(x=kwval)
plt.hist(self.gp_arrays[y_key],bins=round(self.gp_arrays[y_key].shape[0]))
plt.legend(['gp prediction']+plotkeys+['training samples'])
plt.show()
|
httpclient_test.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
def get(self):
self.write("asdf")
self.flush()
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1)
self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.startswith('Content-Type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u("MyUserAgent"), b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
@gen_test
def test_body_sanity_checks(self):
hello_url = self.get_url('/hello')
with self.assertRaises(ValueError) as context:
yield self.http_client.fetch(hello_url, body='data')
self.assertTrue('must be None' in str(context.exception))
with self.assertRaises(ValueError) as context:
yield self.http_client.fetch(hello_url, method='POST')
self.assertTrue('must not be None' in str(context.exception))
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
self.server_ioloop.stop()
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
|
pivideostream.py
|
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
import cv2
import time
class PiVideoStream:
def __init__(self, resolution=(320, 240), framerate=32, **kwargs):
# initialize the camera
self.camera = PiCamera()
# set camera parameters
self.camera.resolution = resolution
self.camera.framerate = framerate
# set optional camera parameters (refer to PiCamera docs)
for (arg, value) in kwargs.items():
setattr(self.camera, arg, value)
# initialize the stream
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the checker that checks is it updated
# and the variable used to indicate if the thread should be stopped
self.frame = None
self.is_updated = False
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame and update the update checker
self.frame = f.array
self.rawCapture.truncate(0)
self.is_updated = True
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# block until there is newly updated frame
while not self.is_updated:
if self.stopped:
return None
time.sleep(0.001)
continue
self.is_updated = False
# return the newly updated frame
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
BlockCompressor.py
|
from os import remove
from nsz.nut import Print
from time import sleep
from pathlib import Path
from traceback import format_exc
from zstandard import ZstdCompressor
from nsz.ThreadSafeCounter import Counter
from nsz.SectionFs import isNcaPacked, sortedFs
from multiprocessing import Process, Manager
from nsz.Fs import Pfs0, Hfs0, Nca, Type, Ticket, Xci, factory
from nsz.PathTools import *
import enlighten
#import sys
def compressBlockTask(in_queue, out_list, readyForWork, pleaseKillYourself, blockSize):
while True:
readyForWork.increment()
item = in_queue.get()
#readyForWork.decrement() # https://github.com/nicoboss/nsz/issues/80
if pleaseKillYourself.value() > 0:
break
buffer, compressionLevel, compressedblockSizeList, chunkRelativeBlockID = item # compressedblockSizeList IS UNUSED VARIABLE
if buffer == 0:
return
if compressionLevel == 0 and len(buffer) == blockSize: # https://github.com/nicoboss/nsz/issues/79
out_list[chunkRelativeBlockID] = buffer
else:
compressed = ZstdCompressor(level=compressionLevel).compress(buffer)
out_list[chunkRelativeBlockID] = compressed if len(compressed) < len(buffer) else buffer
def blockCompress(filePath, compressionLevel, blockSizeExponent, outputDir, threads):
if filePath.suffix == '.nsp':
return blockCompressNsp(filePath, compressionLevel, blockSizeExponent, outputDir, threads)
elif filePath.suffix == '.xci':
return blockCompressXci(filePath, compressionLevel, blockSizeExponent, outputDir, threads)
def blockCompressContainer(readContainer, writeContainer, compressionLevel, blockSizeExponent, threads):
CHUNK_SZ = 0x100000
UNCOMPRESSABLE_HEADER_SIZE = 0x4000
if blockSizeExponent < 14 or blockSizeExponent > 32:
raise ValueError("Block size must be between 14 and 32")
blockSize = 2**blockSizeExponent
manager = Manager()
results = manager.list()
readyForWork = Counter(0)
pleaseKillYourself = Counter(0)
TasksPerChunk = 209715200//blockSize
for i in range(TasksPerChunk):
results.append(b"")
pool = []
work = manager.Queue(threads)
for i in range(threads):
p = Process(target=compressBlockTask, args=(work, results, readyForWork, pleaseKillYourself, blockSize))
p.start()
pool.append(p)
for nspf in readContainer:
if isinstance(nspf, Nca.Nca) and nspf.header.contentType == Type.Content.DATA:
Print.info('Skipping delta fragment {0}'.format(nspf._path))
continue
if isinstance(nspf, Nca.Nca) and (nspf.header.contentType == Type.Content.PROGRAM or nspf.header.contentType == Type.Content.PUBLICDATA) and nspf.size > UNCOMPRESSABLE_HEADER_SIZE:
if isNcaPacked(nspf):
offsetFirstSection = sortedFs(nspf)[0].offset
newFileName = nspf._path[0:-1] + 'z'
f = writeContainer.add(newFileName, nspf.size)
startPos = f.tell()
nspf.seek(0)
f.write(nspf.read(UNCOMPRESSABLE_HEADER_SIZE))
sections = []
for fs in sortedFs(nspf):
sections += fs.getEncryptionSections()
if len(sections) == 0:
for p in pool:
#Process.terminate() might corrupt the datastructure but we do't care
p.terminate()
raise Exception("NCA can't be decrypted. Outdated keys.txt?")
header = b'NCZSECTN'
header += len(sections).to_bytes(8, 'little')
i = 0
for fs in sections:
i += 1
header += fs.offset.to_bytes(8, 'little')
header += fs.size.to_bytes(8, 'little')
header += fs.cryptoType.to_bytes(8, 'little')
header += b'\x00' * 8
header += fs.cryptoKey
header += fs.cryptoCounter
f.write(header)
blockID = 0
chunkRelativeBlockID = 0
startChunkBlockID = 0
blocksHeaderFilePos = f.tell()
bytesToCompress = nspf.size - UNCOMPRESSABLE_HEADER_SIZE
blocksToCompress = bytesToCompress//blockSize + (bytesToCompress%blockSize > 0)
compressedblockSizeList = [0]*blocksToCompress
header = b'NCZBLOCK' #Magic
header += b'\x02' #Version
header += b'\x01' #Type
header += b'\x00' #Unused
header += blockSizeExponent.to_bytes(1, 'little') #blockSizeExponent in bits: 2^x
header += blocksToCompress.to_bytes(4, 'little') #Amount of Blocks
header += bytesToCompress.to_bytes(8, 'little') #Decompressed Size
header += b'\x00' * (blocksToCompress*4)
f.write(header)
decompressedBytes = UNCOMPRESSABLE_HEADER_SIZE
compressedBytes = f.tell()
BAR_FMT = u'{desc}{desc_pad}{percentage:3.0f}%|{bar}| {count:{len_total}d}/{total:d} {unit} [{elapsed}<{eta}, {rate:.2f}{unit_pad}{unit}/s]'
bar = enlighten.Counter(total=nspf.size//1048576, desc='Compressing', unit='MiB', color='cyan', bar_format=BAR_FMT)
subBars = bar.add_subcounter('green', all_fields=True)
partitions = []
if offsetFirstSection-UNCOMPRESSABLE_HEADER_SIZE > 0:
partitions.append(nspf.partition(offset = UNCOMPRESSABLE_HEADER_SIZE, size = offsetFirstSection-UNCOMPRESSABLE_HEADER_SIZE, cryptoType = Type.Crypto.CTR.NONE, autoOpen = True))
for section in sections:
#Print.info('offset: %x\t\tsize: %x\t\ttype: %d\t\tiv%s' % (section.offset, section.size, section.cryptoType, str(hx(section.cryptoCounter))), pleaseNoPrint)
partitions.append(nspf.partition(offset = section.offset, size = section.size, cryptoType = section.cryptoType, cryptoKey = section.cryptoKey, cryptoCounter = bytearray(section.cryptoCounter), autoOpen = True))
if UNCOMPRESSABLE_HEADER_SIZE-offsetFirstSection > 0:
partitions[0].seek(UNCOMPRESSABLE_HEADER_SIZE-offsetFirstSection)
partNr = 0
bar.count = nspf.tell()//1048576
subBars.count = f.tell()//1048576
bar.refresh()
while True:
buffer = partitions[partNr].read(blockSize)
while (len(buffer) < blockSize and partNr < len(partitions)-1):
partitions[partNr].close()
partitions[partNr] = None
partNr += 1
buffer += partitions[partNr].read(blockSize - len(buffer))
if chunkRelativeBlockID >= TasksPerChunk or len(buffer) == 0:
while readyForWork.value() < threads:
sleep(0.02)
for i in range(min(TasksPerChunk, blocksToCompress-startChunkBlockID)):
lenResult = len(results[i])
compressedBytes += lenResult
compressedblockSizeList[startChunkBlockID+i] = lenResult
f.write(results[i])
results[i] = b""
if len(buffer) == 0:
break
chunkRelativeBlockID = 0
startChunkBlockID = blockID
work.put([buffer, compressionLevel, compressedblockSizeList, chunkRelativeBlockID])
readyForWork.decrement()
blockID += 1
chunkRelativeBlockID += 1
decompressedBytes += len(buffer)
bar.count = decompressedBytes//1048576
subBars.count = compressedBytes//1048576
bar.refresh()
partitions[partNr].close()
partitions[partNr] = None
endPos = f.tell()
bar.count = decompressedBytes//1048576
subBars.count = compressedBytes//1048576
bar.close()
written = endPos - startPos
f.seek(blocksHeaderFilePos+24)
header = b""
for compressedblockSize in compressedblockSizeList:
header += compressedblockSize.to_bytes(4, 'little')
f.write(header)
f.seek(endPos) #Seek to end of file.
Print.info('compressed %d%% %d -> %d - %s' % (int(written * 100 / nspf.size), decompressedBytes, written, nspf._path))
writeContainer.resize(newFileName, written)
continue
else:
Print.info('Skipping not packed {0}'.format(nspf._path))
f = writeContainer.add(nspf._path, nspf.size)
nspf.seek(0)
while not nspf.eof():
buffer = nspf.read(CHUNK_SZ)
f.write(buffer)
#Ensures that all threads are started and compleaded before being requested to quit
while readyForWork.value() < threads:
sleep(0.02)
pleaseKillYourself.increment()
for i in range(readyForWork.value()):
work.put(None)
readyForWork.decrement()
while readyForWork.value() > 0:
sleep(0.02)
def blockCompressNsp(filePath, compressionLevel , blockSizeExponent, outputDir, threads):
filePath = filePath.resolve()
container = factory(filePath)
container.open(str(filePath), 'rb')
nszPath = outputDir.joinpath(filePath.stem + '.nsz')
Print.info('Block compressing (level {0}) {1} -> {2}'.format(compressionLevel, filePath, nszPath))
try:
with Pfs0.Pfs0Stream(str(nszPath)) as nsp:
blockCompressContainer(container, nsp, compressionLevel, blockSizeExponent, threads)
except BaseException as ex:
if not ex is KeyboardInterrupt:
Print.error(format_exc())
if nszPath.is_file():
nszPath.unlink()
container.close()
return nszPath
def blockCompressXci(filePath, compressionLevel, blockSizeExponent, outputDir, threads):
filePath = filePath.resolve()
container = factory(filePath)
container.open(str(filePath), 'rb')
secureIn = container.hfs0['secure']
xczPath = outputDir.joinpath(filePath.stem + '.xcz')
Print.info('Block compressing (level {0}) {1} -> {2}'.format(compressionLevel, filePath, xczPath))
try:
with Xci.XciStream(str(xczPath), originalXciPath = filePath) as xci: # need filepath to copy XCI container settings
with Hfs0.Hfs0Stream(xci.hfs0.add('secure', 0), xci.f.tell()) as secureOut:
blockCompressContainer(secureIn, secureOut, compressionLevel, blockSizeExponent, threads)
xci.hfs0.resize('secure', secureOut.actualSize)
except BaseException as ex:
if not ex is KeyboardInterrupt:
Print.error(format_exc())
if xczPath.is_file():
xczPath.unlink()
container.close()
return xczPath
|
3_5_example.py
|
# -*- coding: utf-8 -*-
"""
This recipe describes how to handle asynchronous I/O in an environment where
you are running Tkinter as the graphical user interface. Tkinter is safe
to use as long as all the graphics commands are handled in a single thread.
Since it is more efficient to make I/O channels to block and wait for something
to happen rather than poll at regular intervals, we want I/O to be handled
in separate threads. These can communicate in a threasafe way with the main,
GUI-oriented process through one or several queues. In this solution the GUI
still has to make a poll at a reasonable interval, to check if there is
something in the queue that needs processing. Other solutions are possible,
but they add a lot of complexity to the application.
Original code created by Jacob Hallén, AB Strakt, Sweden. 2001-10-17
"""
import tkinter
import time
import threading
import random
import queue
class GuiPart:
def __init__(self, master, main_queue, end_command):
self.queue = main_queue
self.master = master
# Set up the GUI
self.console = tkinter.Button(self.master, text='Done', command=end_command)
self.console.pack()
# Add more GUI stuff here
def process_incoming(self):
"""
Handle all the messages currently in the queue (if any).
"""
while self.queue.qsize():
try:
msg = self.queue.get(0)
# Check contents of message and do what it says
# As a test, we simply change the Button text
self.console.configure(text=msg)
# Update the window after changing text on Button
self.master.update_idletasks()
except queue.Empty:
pass
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, master):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI. We spawn a new thread for the worker.
"""
self.master = master
# Create the queue
self.queue = queue.Queue()
# Set up the GUI part
self.gui = GuiPart(master, self.queue, self.end_application)
# Set up the thread to do asynchronous I/O
# More can be made if necessary
self.running = True
self.thread1 = threading.Thread(target=self.worker_thread1)
self.thread1.start()
# Start the periodic call in the GUI to check if the queue contains
# anything
self.periodic_call()
# If you close the window, stop the thread
self.master.bind('<Destroy>', lambda event: self.end_application())
def periodic_call(self):
"""
Check every 100 ms if there is something new in the queue.
"""
self.gui.process_incoming()
if not self.running:
# This is the brutal stop of the system. You may want to do
# some cleanup before actually shutting it down.
import sys
sys.exit(1)
self.master.after(100, self.periodic_call)
def worker_thread1(self):
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select()'.
One important thing to remember is that the thread has to yield
control.
"""
while self.running:
# To simulate asynchronous I/O, we create a random number at
# random intervals. Replace the following 2 lines with the real
# thing.
time.sleep(rand.random() * 0.3)
msg = rand.random()
self.queue.put(msg)
def end_application(self):
self.running = False
rand = random.Random()
root = tkinter.Tk()
root.minsize(width=200, height=0)
client = ThreadedClient(root)
root.mainloop()
|
framework.py
|
#!/usr/bin/env python
from __future__ import print_function
import gc
import sys
import os
import select
import unittest
import tempfile
import time
import faulthandler
import random
import copy
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
from scapy.packet import Raw
from hook import StepHook, PollHook, VppDiedError
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_papi_provider import VppPapiProvider
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
getLogger, colorize
from vpp_object import VppObjectRegistry
from util import ppp
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
if os.name == 'posix' and sys.version_info[0] < 3:
# using subprocess32 is recommended by python official documentation
# @ https://docs.python.org/2/library/subprocess.html
import subprocess32 as subprocess
else:
import subprocess
debug_framework = False
if os.getenv('TEST_DEBUG', "0") == "1":
debug_framework = True
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.wait(0):
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the flag will take care
# of properly terminating the loop
def running_extended_tests():
s = os.getenv("EXTENDED_TESTS", "n")
return True if s.lower() in ("y", "yes", "1") else False
def running_on_centos():
os_id = os.getenv("OS_ID", "")
return True if "centos" in os_id.lower() else False
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if hasattr(self, '_pipe'):
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = test.__name__
else:
desc = test.shortDescription()
if not desc:
desc = str(test)
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb":
cls.debug_gdb = True
elif dl == "gdbserver":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
s = os.getenv("STEP", "n")
cls.step = True if s.lower() in ("y", "yes", "1") else False
d = os.getenv("DEBUG", None)
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "}", "api-trace", "{", "on", "}",
"api-segment", "{", "prefix", cls.shm_prefix, "}",
"plugins", "{", "plugin", "dpdk_plugin.so", "{",
"disable", "}", "}", ]
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
cls.logger.info("vpp_cmdline: %s" % cls.vpp_cmdline)
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug the VPP using e.g.:")
if cls.debug_gdbserver:
print("gdb " + cls.vpp_bin + " -ex 'target remote localhost:7777'")
print("Now is the time to attach a gdb by running the above "
"command, set up breakpoints etc. and then resume VPP from "
"within gdb by issuing the 'continue' command")
elif cls.debug_gdb:
print("gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach a gdb by running the above "
"command and set up breakpoints etc.")
print(single_line_delim)
raw_input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:7777'] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
except Exception as e:
cls.logger.critical("Couldn't start vpp: %s" % e)
raise
cls.wait_for_enter()
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
gc.collect() # run garbage collection first
random.seed()
cls.logger = getLogger(cls.__name__)
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.shm_prefix = cls.tempdir.split("/")[-1]
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls._zombie_captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.reporter.send_keep_alive(cls)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
if cls.step:
hook = StepHook(cls)
else:
hook = PollHook(cls)
cls.vapi.register_hook(hook)
cls.sleep(0.1, "after vpp startup, before initial poll")
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except Exception:
try:
cls.vapi.disconnect()
except Exception:
pass
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
except Exception:
try:
cls.quit()
except Exception:
pass
raise
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'):
cls.vpp.poll()
if cls.vpp.returncode is None:
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
raw_input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up')
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stdderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.vapi.disconnect()
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show run"))
self.logger.info(self.vapi.ppcli("show log"))
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
api_trace = "vpp_api_trace.%s.log" % self._testMethodName
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
vpp_api_trace_log))
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
self.reporter.send_keep_alive(self)
self.logger.debug("--- setUp() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if self.vpp_dead:
raise Exception("VPP is dead when setting up the test")
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, cap_name):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
# filter out from zombies
cls._zombie_captures = [(stamp, name)
for (stamp, name) in cls._zombie_captures
if name != cap_name]
@classmethod
def pg_start(cls):
""" Remove any zombie captures and enable the packet generator """
# how long before capture is allowed to be deleted - otherwise vpp
# crashes - 100ms seems enough (this shouldn't be needed at all)
capture_ttl = 0.1
now = time.time()
for stamp, cap_name in cls._zombie_captures:
wait = stamp + capture_ttl - now
if wait > 0:
cls.sleep(wait, "before deleting capture %s" % cap_name)
now = time.time()
cls.logger.debug("Removing zombie capture %s" % cap_name)
cls.vapi.cli('packet-generator delete %s' % cap_name)
cls.vapi.cli("trace add pg-input 50") # 50 is maximum
cls.vapi.cli('packet-generator enable')
cls._zombie_captures = cls._captures
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppLoInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.lo_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend / len(padding)) + 1
packet[Raw].load += (padding * num)[:extend]
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = payload.split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
received = packet.__class__(str(packet))
self.logger.debug(
ppp("Verifying packet checksums for packet:", received))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
temp = received.__class__(str(received))
while True:
layer = temp.getlayer(counter)
if layer:
for cf in checksum_fields:
if hasattr(layer, cf):
if ignore_zero_udp_checksums and \
0 == getattr(layer, cf) and \
layer.name in udp_layers:
continue
delattr(layer, cf)
checksums.append((counter, cf))
else:
break
counter = counter + 1
if 0 == len(checksums):
return
temp = temp.__class__(str(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
getattr(received[layer], cf), calc_sum,
"packet checksum on layer #%d: %s" % (layer, temp[layer].name))
self.logger.debug(
"Checksum field `%s` on `%s` layer has correct value `%s`" %
(cf, temp[layer].name, calc_sum))
def assert_checksum_valid(self, received_packet, layer,
field_name='chksum',
ignore_zero_checksum=False):
""" Check checksum of received packet on given layer """
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
recalculated = received_packet.__class__(str(received_packet))
delattr(recalculated[layer], field_name)
recalculated = recalculated.__class__(str(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
def assert_ip_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'IP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_tcp_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'TCP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_udp_checksum_valid(self, received_packet,
ignore_zero_checksum=True):
self.assert_checksum_valid(received_packet, 'UDP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_embedded_icmp_checksum_valid(self, received_packet):
if received_packet.haslayer(IPerror):
self.assert_checksum_valid(received_packet, 'IPerror')
if received_packet.haslayer(TCPerror):
self.assert_checksum_valid(received_packet, 'TCPerror')
if received_packet.haslayer(UDPerror):
self.assert_checksum_valid(received_packet, 'UDPerror',
ignore_zero_checksum=True)
if received_packet.haslayer(ICMPerror):
self.assert_checksum_valid(received_packet, 'ICMPerror')
def assert_icmp_checksum_valid(self, received_packet):
self.assert_checksum_valid(received_packet, 'ICMP')
self.assert_embedded_icmp_checksum_valid(received_packet)
def assert_icmpv6_checksum_valid(self, pkt):
if pkt.haslayer(ICMPv6DestUnreach):
self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
self.assert_embedded_icmp_checksum_valid(pkt)
if pkt.haslayer(ICMPv6EchoRequest):
self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
@classmethod
def sleep(cls, timeout, remark=None):
if hasattr(cls, 'logger'):
cls.logger.debug("Starting sleep for %ss (%s)" % (timeout, remark))
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected time.sleep() result - "
"slept for %ss instead of ~%ss!" % (
after - before, timeout))
if hasattr(cls, 'logger'):
cls.logger.debug(
"Finished sleep (%s) - slept %ss (wanted %ss)" % (
remark, after - before, timeout))
def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
self.vapi.cli("clear trace")
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
if not timeout:
timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
def send_and_expect(self, input, pkts, output):
self.vapi.cli("clear trace")
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = output.get_capture(len(pkts))
return rx
class TestCasePrinter(object):
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
if not hasattr(self, "_test_case_set"):
self._test_case_set = set()
def print_test_case_heading_if_first_time(self, case):
if case.__class__ not in self._test_case_set:
print(double_line_delim)
print(colorize(getdoc(case.__class__).splitlines()[0], YELLOW))
print(double_line_delim)
self._test_case_set.add(case.__class__)
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
def __init__(self, stream, descriptions, verbosity):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
unittest.TestResult.__init__(self, stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.printer = TestCasePrinter()
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSuccess() %s.%s(%s) called"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSkip() %s.%s(%s) called, reason is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc,
reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
def symlink_failed(self, test):
logger = None
if hasattr(test, 'logger'):
logger = test.logger
if hasattr(test, 'tempdir'):
try:
failed_dir = os.getenv('VPP_TEST_FAILED_DIR')
link_path = '%s/%s-FAILED' % (failed_dir,
test.tempdir.split("/")[-1])
if logger:
logger.debug("creating a link to the failed test")
logger.debug("os.symlink(%s, %s)" %
(test.tempdir, link_path))
os.symlink(test.tempdir, link_path)
except Exception as e:
if logger:
logger.error(e)
def send_failure_through_pipe(self, test):
if hasattr(self, 'test_framework_failed_pipe'):
pipe = self.test_framework_failed_pipe
if pipe:
if test.__class__.__name__ == "_ErrorHolder":
x = str(test)
if x.startswith("setUpClass"):
# x looks like setUpClass (test_function.test_class)
cls = x.split(".")[1].split(")")[0]
for t in self.test_suite:
if t.__class__.__name__ == cls:
pipe.send(t.__class__)
break
else:
raise Exception("Can't find class name `%s' "
"(from ErrorHolder) in test suite "
"`%s'" % (cls, self.test_suite))
else:
raise Exception("FIXME: unexpected special case - "
"ErrorHolder description is `%s'" %
str(test))
else:
pipe.send(test.__class__)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addFailure() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addFailure(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("FAIL", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
self.symlink_failed(test)
else:
self.result_string = colorize("FAIL", RED) + ' [no temp dir]'
self.send_failure_through_pipe(test)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addError() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addError(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("ERROR", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
self.symlink_failed(test)
else:
self.result_string = colorize("ERROR", RED) + ' [no temp dir]'
self.send_failure_through_pipe(test)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
# TODO: if none print warning not raise exception
short_description = test.shortDescription()
if self.descriptions and short_description:
return short_description
else:
return str(test)
def startTest(self, test):
"""
Start a test
:param test:
"""
self.printer.print_test_case_heading_if_first_time(test)
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Stop a test
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
def printErrors(self):
"""
Print errors from running the test case
"""
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class Filter_by_test_option:
def __init__(self, filter_file_name, filter_class_name, filter_func_name):
self.filter_file_name = filter_file_name
self.filter_class_name = filter_class_name
self.filter_func_name = filter_func_name
def __call__(self, file_name, class_name, func_name):
if self.filter_file_name and file_name != self.filter_file_name:
return False
if self.filter_class_name and class_name != self.filter_class_name:
return False
if self.filter_func_name and func_name != self.filter_func_name:
return False
return True
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, failed_pipe=None,
stream=sys.stderr, descriptions=True,
verbosity=1, failfast=False, buffer=False, resultclass=None):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass)
reporter = KeepAliveReporter()
reporter.pipe = keep_alive_pipe
# this is super-ugly, but very simple to implement and works as long
# as we run only one test at the same time
VppTestResult.test_framework_failed_pipe = failed_pipe
test_option = "TEST"
def parse_test_option(self):
f = os.getenv(self.test_option, None)
filter_file_name = None
filter_class_name = None
filter_func_name = None
if f:
if '.' in f:
parts = f.split('.')
if len(parts) > 3:
raise Exception("Unrecognized %s option: %s" %
(self.test_option, f))
if len(parts) > 2:
if parts[2] not in ('*', ''):
filter_func_name = parts[2]
if parts[1] not in ('*', ''):
filter_class_name = parts[1]
if parts[0] not in ('*', ''):
if parts[0].startswith('test_'):
filter_file_name = parts[0]
else:
filter_file_name = 'test_%s' % parts[0]
else:
if f.startswith('test_'):
filter_file_name = f
else:
filter_file_name = 'test_%s' % f
return filter_file_name, filter_class_name, filter_func_name
@staticmethod
def filter_tests(tests, filter_cb):
result = unittest.suite.TestSuite()
for t in tests:
if isinstance(t, unittest.suite.TestSuite):
# this is a bunch of tests, recursively filter...
x = VppTestRunner.filter_tests(t, filter_cb)
if x.countTestCases() > 0:
result.addTest(x)
elif isinstance(t, unittest.TestCase):
# this is a single test
parts = t.id().split('.')
# t.id() for common cases like this:
# test_classifier.TestClassifier.test_acl_ip
# apply filtering only if it is so
if len(parts) == 3:
if not filter_cb(parts[0], parts[1], parts[2]):
continue
result.addTest(t)
else:
# unexpected object, don't touch it
result.addTest(t)
return result
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
print("Running tests using custom test runner") # debug message
filter_file, filter_class, filter_func = self.parse_test_option()
print("Active filters: file=%s, class=%s, function=%s" % (
filter_file, filter_class, filter_func))
filter_cb = Filter_by_test_option(
filter_file, filter_class, filter_func)
filtered = self.filter_tests(test, filter_cb)
print("%s out of %s tests match specified filters" % (
filtered.countTestCases(), test.countTestCases()))
if not running_extended_tests():
print("Not running extended tests (some tests will be skipped)")
# super-ugly hack #2
VppTestResult.test_suite = filtered
return super(VppTestRunner, self).run(filtered)
class Worker(Thread):
def __init__(self, args, logger, env={}):
self.logger = logger
self.args = args
self.result = None
self.env = copy.deepcopy(env)
super(Worker, self).__init__()
def run(self):
executable = self.args[0]
self.logger.debug("Running executable w/args `%s'" % self.args)
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = self.process.communicate()
self.logger.debug("Finished running `%s'" % executable)
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stdout:" % executable)
self.logger.info(single_line_delim)
self.logger.info(out)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stderr:" % executable)
self.logger.info(single_line_delim)
self.logger.info(err)
self.logger.info(single_line_delim)
self.result = self.process.returncode
|
task.py
|
import atexit
import json
import os
import shutil
import signal
import sys
import threading
import time
from argparse import ArgumentParser
from logging import getLogger
from operator import attrgetter
from tempfile import mkstemp, mkdtemp
from zipfile import ZipFile, ZIP_DEFLATED
try:
# noinspection PyCompatibility
from collections.abc import Sequence as CollectionsSequence
except ImportError:
from collections import Sequence as CollectionsSequence
from typing import Optional, Union, Mapping, Sequence, Any, Dict, Iterable, TYPE_CHECKING, Callable
import psutil
import six
from pathlib2 import Path
from .backend_config.defs import get_active_config_file, get_config_file
from .backend_api.services import tasks, projects, queues
from .backend_api.session.session import (
Session, ENV_ACCESS_KEY, ENV_SECRET_KEY, ENV_HOST, ENV_WEB_HOST, ENV_FILES_HOST)
from .backend_interface.metrics import Metrics
from .backend_interface.model import Model as BackendModel
from .backend_interface.task import Task as _Task
from .backend_interface.task.log import TaskHandler
from .backend_interface.task.development.worker import DevWorker
from .backend_interface.task.repo import ScriptInfo
from .backend_interface.task.models import TaskModels
from .backend_interface.util import get_single_result, exact_match_regex, make_message, mutually_exclusive
from .binding.absl_bind import PatchAbsl
from .binding.artifacts import Artifacts, Artifact
from .binding.environ_bind import EnvironmentBind, PatchOsFork
from .binding.frameworks.fastai_bind import PatchFastai
from .binding.frameworks.lightgbm_bind import PatchLIGHTgbmModelIO
from .binding.frameworks.pytorch_bind import PatchPyTorchModelIO
from .binding.frameworks.tensorflow_bind import TensorflowBinding
from .binding.frameworks.xgboost_bind import PatchXGBoostModelIO
from .binding.joblib_bind import PatchedJoblib
from .binding.matplotlib_bind import PatchedMatplotlib
from .binding.hydra_bind import PatchHydra
from .config import (
config, DEV_TASK_NO_REUSE, get_is_master_node, DEBUG_SIMULATE_REMOTE_TASK, PROC_MASTER_ID_ENV_VAR,
DEV_DEFAULT_OUTPUT_URI, )
from .config import running_remotely, get_remote_task_id
from .config.cache import SessionCache
from .debugging.log import LoggerRoot
from .errors import UsageError
from .logger import Logger
from .model import Model, InputModel, OutputModel
from .task_parameters import TaskParameters
from .utilities.config import verify_basic_value
from .utilities.args import argparser_parseargs_called, get_argparser_last_args, \
argparser_update_currenttask
from .utilities.dicts import ReadOnlyDict, merge_dicts
from .utilities.proxy_object import ProxyDictPreWrite, ProxyDictPostWrite, flatten_dictionary, \
nested_from_flat_dictionary, naive_nested_from_flat_dictionary
from .utilities.resource_monitor import ResourceMonitor
from .utilities.seed import make_deterministic
from .utilities.lowlevel.threads import get_current_thread_id
from .utilities.process.mp import BackgroundMonitor, leave_process
# noinspection PyProtectedMember
from .backend_interface.task.args import _Arguments
if TYPE_CHECKING:
import pandas
import numpy
from PIL import Image
class Task(_Task):
"""
The ``Task`` class is a code template for a Task object which, together with its connected experiment components,
represents the current running experiment. These connected components include hyperparameters, loggers,
configuration, label enumeration, models, and other artifacts.
The term "main execution Task" refers to the Task context for current running experiment. Python experiment scripts
can create one, and only one, main execution Task. It is a traceable, and after a script runs and ClearML stores
the Task in the **ClearML Server** (backend), it is modifiable, reproducible, executable by a worker, and you
can duplicate it for further experimentation.
The ``Task`` class and its methods allow you to create and manage experiments, as well as perform
advanced experimentation functions, such as autoML.
.. warning::
Do not construct Task objects directly. Use one of the methods listed below to create experiments or
reference existing experiments.
For detailed information about creating Task objects, see the following methods:
- Create a new reproducible Task - :meth:`Task.init`
.. important::
In some cases, ``Task.init`` may return a Task object which is already stored in **ClearML Server** (already
initialized), instead of creating a new Task. For a detailed explanation of those cases, see the ``Task.init``
method.
- Manually create a new Task (no auto-logging will apply) - :meth:`Task.create`
- Get the current running Task - :meth:`Task.current_task`
- Get another (different) Task - :meth:`Task.get_task`
.. note::
The **ClearML** documentation often refers to a Task as, "Task (experiment)".
"Task" refers to the class in the ClearML Python Client Package, the object in your Python experiment script,
and the entity with which **ClearML Server** and **ClearML Agent** work.
"Experiment" refers to your deep learning solution, including its connected components, inputs, and outputs,
and is the experiment you can view, analyze, compare, modify, duplicate, and manage using the ClearML
**Web-App** (UI).
Therefore, a "Task" is effectively an "experiment", and "Task (experiment)" encompasses its usage throughout
the ClearML.
The exception to this Task behavior is sub-tasks (non-reproducible Tasks), which do not use the main execution
Task. Creating a sub-task always creates a new Task with a new Task ID.
"""
TaskTypes = _Task.TaskTypes
NotSet = object()
__create_protection = object()
__main_task = None # type: Optional[Task]
__exit_hook = None
__forked_proc_main_pid = None
__task_id_reuse_time_window_in_hours = float(config.get('development.task_reuse_time_window_in_hours', 24.0))
__detect_repo_async = config.get('development.vcs_repo_detect_async', False)
__default_output_uri = DEV_DEFAULT_OUTPUT_URI.get() or config.get('development.default_output_uri', None)
class _ConnectedParametersType(object):
argparse = "argument_parser"
dictionary = "dictionary"
task_parameters = "task_parameters"
@classmethod
def _options(cls):
return {
var for var, val in vars(cls).items()
if isinstance(val, six.string_types)
}
def __init__(self, private=None, **kwargs):
"""
.. warning::
**Do not construct Task manually!**
Please use :meth:`Task.init` or :meth:`Task.get_task`
"""
if private is not Task.__create_protection:
raise UsageError(
'Task object cannot be instantiated externally, use Task.current_task() or Task.get_task(...)')
self._repo_detect_lock = threading.RLock()
super(Task, self).__init__(**kwargs)
self._arguments = _Arguments(self)
self._logger = None
self._connected_output_model = None
self._dev_worker = None
self._connected_parameter_type = None
self._detect_repo_async_thread = None
self._resource_monitor = None
self._calling_filename = None
self._remote_functions_generated = {}
# register atexit, so that we mark the task as stopped
self._at_exit_called = False
@classmethod
def current_task(cls):
# type: () -> Task
"""
Get the current running Task (experiment). This is the main execution Task (task context) returned as a Task
object.
:return: The current running Task (experiment).
"""
# check if we have no main Task, but the main process created one.
if not cls.__main_task and PROC_MASTER_ID_ENV_VAR.get():
# initialize the Task, connect to stdout
Task.init()
# return main Task
return cls.__main_task
@classmethod
def init(
cls,
project_name=None, # type: Optional[str]
task_name=None, # type: Optional[str]
task_type=TaskTypes.training, # type: Task.TaskTypes
tags=None, # type: Optional[Sequence[str]]
reuse_last_task_id=True, # type: Union[bool, str]
continue_last_task=False, # type: Union[bool, str]
output_uri=None, # type: Optional[Union[str, bool]]
auto_connect_arg_parser=True, # type: Union[bool, Mapping[str, bool]]
auto_connect_frameworks=True, # type: Union[bool, Mapping[str, bool]]
auto_resource_monitoring=True, # type: bool
auto_connect_streams=True, # type: Union[bool, Mapping[str, bool]]
):
# type: (...) -> Task
"""
Creates a new Task (experiment) if:
- The Task never ran before. No Task with the same ``task_name`` and ``project_name`` is stored in
**ClearML Server**.
- The Task has run before (the same ``task_name`` and ``project_name``), and (a) it stored models and / or
artifacts, or (b) its status is Published , or (c) it is Archived.
- A new Task is forced by calling ``Task.init`` with ``reuse_last_task_id=False``.
Otherwise, the already initialized Task object for the same ``task_name`` and ``project_name`` is returned.
.. note::
To reference another Task, instead of initializing the same Task more than once, call
:meth:`Task.get_task`. For example, to "share" the same experiment in more than one script,
call ``Task.get_task``. See the ``Task.get_task`` method for an example.
For example:
The first time the following code runs, it will create a new Task. The status will be Completed.
.. code-block:: py
from clearml import Task
task = Task.init('myProject', 'myTask')
If this code runs again, it will not create a new Task. It does not store a model or artifact,
it is not Published (its status Completed) , it was not Archived, and a new Task is not forced.
If the Task is Published or Archived, and run again, it will create a new Task with a new Task ID.
The following code will create a new Task every time it runs, because it stores an artifact.
.. code-block:: py
task = Task.init('myProject', 'myOtherTask')
d = {'a': '1'}
task.upload_artifact('myArtifact', d)
:param str project_name: The name of the project in which the experiment will be created. If the project does
not exist, it is created. If ``project_name`` is ``None``, the repository name is used. (Optional)
:param str task_name: The name of Task (experiment). If ``task_name`` is ``None``, the Python experiment
script's file name is used. (Optional)
:param TaskTypes task_type: The task type.
Valid task types:
- ``TaskTypes.training`` (default)
- ``TaskTypes.testing``
- ``TaskTypes.inference``
- ``TaskTypes.data_processing``
- ``TaskTypes.application``
- ``TaskTypes.monitor``
- ``TaskTypes.controller``
- ``TaskTypes.optimizer``
- ``TaskTypes.service``
- ``TaskTypes.qc``
- ``TaskTypes.custom``
:param tags: Add a list of tags (str) to the created Task. For example: tags=['512x512', 'yolov3']
:param bool reuse_last_task_id: Force a new Task (experiment) with a previously used Task ID,
and the same project and Task name.
.. note::
If the previously executed Task has artifacts or models, it will not be reused (overwritten)
and a new Task will be created.
When a Task is reused, the previous execution outputs are deleted, including console outputs and logs.
The values are:
- ``True`` - Reuse the last Task ID. (default)
- ``False`` - Force a new Task (experiment).
- A string - You can also specify a Task ID (string) to be reused,
instead of the cached ID based on the project/name combination.
:param bool continue_last_task: Continue the execution of a previously executed Task (experiment)
.. note::
When continuing the executing of a previously executed Task,
all previous artifacts / models/ logs are intact.
New logs will continue iteration/step based on the previous-execution maximum iteration value.
For example:
The last train/loss scalar reported was iteration 100, the next report will be iteration 101.
The values are:
- ``True`` - Continue the the last Task ID.
specified explicitly by reuse_last_task_id or implicitly with the same logic as reuse_last_task_id
- ``False`` - Overwrite the execution of previous Task (default).
- A string - You can also specify a Task ID (string) to be continued.
This is equivalent to `continue_last_task=True` and `reuse_last_task_id=a_task_id_string`.
:param str output_uri: The default location for output models and other artifacts.
If True is passed, the default files_server will be used for model storage.
In the default location, ClearML creates a subfolder for the output.
The subfolder structure is the following:
<output destination name> / <project name> / <task name>.<Task ID>
The following are examples of ``output_uri`` values for the supported locations:
- A shared folder: ``/mnt/share/folder``
- S3: ``s3://bucket/folder``
- Google Cloud Storage: ``gs://bucket-name/folder``
- Azure Storage: ``azure://company.blob.core.windows.net/folder/``
- Default file server: True
.. important::
For cloud storage, you must install the **ClearML** package for your cloud storage type,
and then configure your storage credentials. For detailed information, see
`ClearML Python Client Extras <./references/clearml_extras_storage/>`_ in the "ClearML Python Client
Reference" section.
:param auto_connect_arg_parser: Automatically connect an argparse object to the Task
The values are:
- ``True`` - Automatically connect. (default)
- ``False`` - Do not automatically connect.
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
arguments. The dictionary keys are argparse variable names and the values are booleans.
The ``False`` value excludes the specified argument from the Task's parameter section.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
For example:
.. code-block:: py
auto_connect_arg_parser={'do_not_include_me': False, }
.. note::
To manually connect an argparse, use :meth:`Task.connect`.
:param auto_connect_frameworks: Automatically connect frameworks This includes patching MatplotLib, XGBoost,
scikit-learn, Keras callbacks, and TensorBoard/X to serialize plots, graphs, and the model location to
the **ClearML Server** (backend), in addition to original output destination.
The values are:
- ``True`` - Automatically connect (default)
- ``False`` - Do not automatically connect
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
frameworks. The dictionary keys are frameworks and the values are booleans.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
For example:
.. code-block:: py
auto_connect_frameworks={'matplotlib': True, 'tensorflow': True, 'tensorboard': True, 'pytorch': True,
'xgboost': True, 'scikit': True, 'fastai': True, 'lightgbm': True, 'hydra': True}
:param bool auto_resource_monitoring: Automatically create machine resource monitoring plots
These plots appear in in the **ClearML Web-App (UI)**, **RESULTS** tab, **SCALARS** sub-tab,
with a title of **:resource monitor:**.
The values are:
- ``True`` - Automatically create resource monitoring plots. (default)
- ``False`` - Do not automatically create.
- Class Type - Create ResourceMonitor object of the specified class type.
:param auto_connect_streams: Control the automatic logging of stdout and stderr
The values are:
- ``True`` - Automatically connect (default)
- ``False`` - Do not automatically connect
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of stdout and
stderr. The dictionary keys are 'stdout' , 'stderr' and 'logging', the values are booleans.
Keys missing from the dictionary default to ``False``, and an empty dictionary defaults to ``False``.
Notice, the default behaviour is logging stdout/stderr the
`logging` module is logged as a by product of the stderr logging
For example:
.. code-block:: py
auto_connect_streams={'stdout': True, 'stderr': True, 'logging': False}
:return: The main execution Task (Task context).
"""
def verify_defaults_match():
validate = [
('project name', project_name, cls.__main_task.get_project_name()),
('task name', task_name, cls.__main_task.name),
('task type', str(task_type) if task_type else task_type, str(cls.__main_task.task_type)),
]
for field, default, current in validate:
if default is not None and default != current:
raise UsageError(
"Current task already created "
"and requested {field} '{default}' does not match current {field} '{current}'. "
"If you wish to create additional tasks use `Task.create`".format(
field=field,
default=default,
current=current,
)
)
if cls.__main_task is not None:
# if this is a subprocess, regardless of what the init was called for,
# we have to fix the main task hooks and stdout bindings
if cls.__forked_proc_main_pid != os.getpid() and cls.__is_subprocess():
if task_type is None:
task_type = cls.__main_task.task_type
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure we do not wait for the repo detect thread
cls.__main_task._detect_repo_async_thread = None
cls.__main_task._dev_worker = None
cls.__main_task._resource_monitor = None
# remove the logger from the previous process
cls.__main_task.get_logger()
# create a new logger (to catch stdout/err)
cls.__main_task._logger = None
cls.__main_task.__reporter = None
# noinspection PyProtectedMember
cls.__main_task._get_logger(auto_connect_streams=auto_connect_streams)
cls.__main_task._artifacts_manager = Artifacts(cls.__main_task)
# unregister signal hooks, they cause subprocess to hang
# noinspection PyProtectedMember
cls.__main_task.__register_at_exit(cls.__main_task._at_exit)
# TODO: Check if the signal handler method is safe enough, for the time being, do not unhook
# cls.__main_task.__register_at_exit(None, only_remove_signal_and_exception_hooks=True)
# start all reporting threads
BackgroundMonitor.start_all(task=cls.__main_task)
if not running_remotely():
verify_defaults_match()
return cls.__main_task
is_sub_process_task_id = None
# check that we are not a child process, in that case do nothing.
# we should not get here unless this is Windows/macOS platform, linux support fork
if cls.__is_subprocess():
class _TaskStub(object):
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, attr):
return self
def __setattr__(self, attr, val):
pass
is_sub_process_task_id = cls.__get_master_id_task_id()
# we could not find a task ID, revert to old stub behaviour
if not is_sub_process_task_id:
return _TaskStub()
elif running_remotely() and not get_is_master_node():
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure everyone understands we should act as if we are a subprocess (fake pid 1)
cls.__update_master_pid_task(pid=1, task=get_remote_task_id())
else:
# set us as master process (without task ID)
cls.__update_master_pid_task()
is_sub_process_task_id = None
if task_type is None:
# Backwards compatibility: if called from Task.current_task and task_type
# was not specified, keep legacy default value of TaskTypes.training
task_type = cls.TaskTypes.training
elif isinstance(task_type, six.string_types):
if task_type not in Task.TaskTypes.__members__:
raise ValueError("Task type '{}' not supported, options are: {}".format(
task_type, Task.TaskTypes.__members__.keys()))
task_type = Task.TaskTypes.__members__[str(task_type)]
try:
if not running_remotely():
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls._create_dev_task(
default_project_name=project_name,
default_task_name=task_name,
default_task_type=task_type,
tags=tags,
reuse_last_task_id=reuse_last_task_id,
continue_last_task=continue_last_task,
detect_repo=False if (
isinstance(auto_connect_frameworks, dict) and
not auto_connect_frameworks.get('detect_repository', True)) else True,
auto_connect_streams=auto_connect_streams,
)
# set defaults
if cls._offline_mode:
task.output_uri = None
elif output_uri:
task.output_uri = output_uri
elif cls.__default_output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
else:
# subprocess should get back the task info
task = Task.get_task(task_id=is_sub_process_task_id)
else:
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls(
private=cls.__create_protection,
task_id=get_remote_task_id(),
log_to_backend=False,
)
if cls.__default_output_uri and not task.output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
# make sure we are started
task.started(ignore_errors=True)
# continue last iteration if we had any
if task.data.last_iteration:
task.set_initial_iteration(int(task.data.last_iteration) + 1)
else:
# subprocess should get back the task info
task = Task.get_task(task_id=is_sub_process_task_id)
except Exception:
raise
else:
Task.__main_task = task
# register the main task for at exit hooks (there should only be one)
task.__register_at_exit(task._at_exit)
# patch OS forking if we are not logging with a subprocess
if not cls._report_subprocess_enabled:
PatchOsFork.patch_fork()
if auto_connect_frameworks:
is_auto_connect_frameworks_bool = not isinstance(auto_connect_frameworks, dict)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('hydra', True):
PatchHydra.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('scikit', True):
PatchedJoblib.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('matplotlib', True):
PatchedMatplotlib.update_current_task(Task.__main_task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('tensorflow', True) \
or auto_connect_frameworks.get('tensorboard', True):
PatchAbsl.update_current_task(Task.__main_task)
TensorflowBinding.update_current_task(
task,
patch_reporting=(is_auto_connect_frameworks_bool
or auto_connect_frameworks.get('tensorboard', True)),
patch_model_io=(is_auto_connect_frameworks_bool
or auto_connect_frameworks.get('tensorflow', True)),
)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('pytorch', True):
PatchPyTorchModelIO.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('xgboost', True):
PatchXGBoostModelIO.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('fastai', True):
PatchFastai.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('lightgbm', True):
PatchLIGHTgbmModelIO.update_current_task(task)
if auto_resource_monitoring and not is_sub_process_task_id:
resource_monitor_cls = auto_resource_monitoring \
if isinstance(auto_resource_monitoring, six.class_types) else ResourceMonitor
task._resource_monitor = resource_monitor_cls(
task, report_mem_used_per_process=not config.get(
'development.worker.report_global_mem_used', False))
task._resource_monitor.start()
# make sure all random generators are initialized with new seed
make_deterministic(task.get_random_seed())
if auto_connect_arg_parser:
EnvironmentBind.update_current_task(Task.__main_task)
# Patch ArgParser to be aware of the current task
argparser_update_currenttask(Task.__main_task)
# set excluded arguments
if isinstance(auto_connect_arg_parser, dict):
task._arguments.exclude_parser_args(auto_connect_arg_parser)
# Check if parse args already called. If so, sync task parameters with parser
if argparser_parseargs_called():
for parser, parsed_args in get_argparser_last_args():
task._connect_argparse(parser=parser, parsed_args=parsed_args)
elif argparser_parseargs_called():
# actually we have nothing to do, in remote running, the argparser will ignore
# all non argparser parameters, only caveat if parameter connected with the same name
# as the argparser this will be solved once sections are introduced to parameters
pass
# Make sure we start the logger, it will patch the main logging object and pipe all output
# if we are running locally and using development mode worker, we will pipe all stdout to logger.
# The logger will automatically take care of all patching (we just need to make sure to initialize it)
logger = task._get_logger(auto_connect_streams=auto_connect_streams)
# show the debug metrics page in the log, it is very convenient
if not is_sub_process_task_id:
if cls._offline_mode:
logger.report_text('ClearML running in offline mode, session stored in {}'.format(
task.get_offline_mode_folder()))
else:
logger.report_text('ClearML results page: {}'.format(task.get_output_log_web_page()))
# Make sure we start the dev worker if required, otherwise it will only be started when we write
# something to the log.
task._dev_mode_setup_worker()
if (not task._reporter or not task._reporter.is_alive()) and \
is_sub_process_task_id and not cls._report_subprocess_enabled:
task._setup_reporter()
# start monitoring in background process or background threads
# monitoring are: Resource monitoring and Dev Worker monitoring classes
BackgroundMonitor.start_all(task=task)
return task
@classmethod
def create(
cls,
project_name=None, # Optional[str]
task_name=None, # Optional[str]
task_type=None, # Optional[str]
repo=None, # Optional[str]
branch=None, # Optional[str]
commit=None, # Optional[str]
script=None, # Optional[str]
working_directory=None, # Optional[str]
packages=None, # Optional[Union[bool, Sequence[str]]]
requirements_file=None, # Optional[Union[str, Path]]
docker=None, # Optional[str]
docker_args=None, # Optional[str]
docker_bash_setup_script=None, # Optional[str]
argparse_args=None, # Optional[Sequence[Tuple[str, str]]]
base_task_id=None, # Optional[str]
add_task_init_call=True, # bool
):
# type: (...) -> Task
"""
Manually create and populate a new Task (experiment) in the system.
If the code does not already contain a call to ``Task.init``, pass add_task_init_call=True,
and the code will be patched in remote execution (i.e. when executed by `clearml-agent`
.. note::
This method **always** creates a new Task.
Use :meth:`Task.init` method to automatically create and populate task for the running process.
To reference an existing Task, call the :meth:`Task.get_task` method .
:param project_name: Set the project name for the task. Required if base_task_id is None.
:param task_name: Set the name of the remote task. Required if base_task_id is None.
:param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
:param repo: Remote URL for the repository to use, or path to local copy of the git repository
Example: 'https://github.com/allegroai/clearml.git' or '~/project/repo'
:param branch: Select specific repository branch/tag (implies the latest commit from the branch)
:param commit: Select specific commit id to use (default: latest commit,
or when used with local repository matching the local commit id)
:param script: Specify the entry point script for the remote execution. When used in tandem with
remote git repository the script should be a relative path inside the repository,
for example: './source/train.py' . When used with local repository path it supports a
direct path to a file inside the local repository itself, for example: '~/project/source/train.py'
:param working_directory: Working directory to launch the script from. Default: repository root folder.
Relative to repo root or local folder.
:param packages: Manually specify a list of required packages. Example: ["tqdm>=2.1", "scikit-learn"]
or `True` to automatically create requirements
based on locally installed packages (repository must be local).
:param requirements_file: Specify requirements.txt file to install when setting the session.
If not provided, the requirements.txt from the repository will be used.
:param docker: Select the docker image to be executed in by the remote session
:param docker_args: Add docker arguments, pass a single string
:param docker_bash_setup_script: Add bash script to be executed
inside the docker before setting up the Task's environement
:param argparse_args: Arguments to pass to the remote execution, list of string pairs (argument, value)
Notice, only supported if the codebase itself uses argparse.ArgumentParser
:param base_task_id: Use a pre-existing task in the system, instead of a local repo/script.
Essentially clones an existing task and overrides arguments/requirements.
:param add_task_init_call: If True, a 'Task.init()' call is added to the script entry point in remote execution.
:return: The newly created Task (experiment)
"""
if not project_name and not base_task_id:
if not cls.__main_task:
raise ValueError("Please provide project_name, no global task context found "
"(Task.current_task hasn't been called)")
project_name = cls.__main_task.get_project_name()
from .backend_interface.task.populate import CreateAndPopulate
manual_populate = CreateAndPopulate(
project_name=project_name, task_name=task_name, task_type=task_type,
repo=repo, branch=branch, commit=commit,
script=script, working_directory=working_directory,
packages=packages, requirements_file=requirements_file,
docker=docker, docker_args=docker_args, docker_bash_setup_script=docker_bash_setup_script,
base_task_id=base_task_id,
add_task_init_call=add_task_init_call,
raise_on_missing_entries=False,
)
task = manual_populate.create_task()
if task and argparse_args:
manual_populate.update_task_args(argparse_args)
task.reload()
return task
@classmethod
def get_task(cls, task_id=None, project_name=None, task_name=None, allow_archived=True, task_filter=None):
# type: (Optional[str], Optional[str], Optional[str], bool, Optional[dict]) -> Task
"""
Get a Task by Id, or project name / task name combination.
For example:
The following code demonstrates calling ``Task.get_task`` to report a scalar to another Task. The output
of :meth:`.Logger.report_scalar` from testing is associated with the Task named ``training``. It allows
training and testing to run concurrently, because they initialized different Tasks (see :meth:`Task.init`
for information about initializing Tasks).
The training script:
.. code-block:: py
# initialize the training Task
task = Task.init('myProject', 'training')
# do some training
The testing script:
.. code-block:: py
# initialize the testing Task
task = Task.init('myProject', 'testing')
# get the training Task
train_task = Task.get_task(project_name='myProject', task_name='training')
# report metrics in the training Task
for x in range(10):
train_task.get_logger().report_scalar('title', 'series', value=x * 2, iteration=x)
:param str task_id: The Id (system UUID) of the experiment to get.
If specified, ``project_name`` and ``task_name`` are ignored.
:param str project_name: The project name of the Task to get.
:param str task_name: The name of the Task within ``project_name`` to get.
:param bool allow_archived: Only applicable if *not* using specific ``task_id``,
If True (default) allow to return archived Tasks, if False filter out archived Tasks
:param bool task_filter: Only applicable if *not* using specific ``task_id``,
Pass additional query filters, on top of project/name. See details in Task.get_tasks.
:return: The Task specified by ID, or project name / experiment name combination.
"""
return cls.__get_task(
task_id=task_id, project_name=project_name, task_name=task_name,
include_archived=allow_archived, task_filter=task_filter,
)
@classmethod
def get_tasks(cls, task_ids=None, project_name=None, task_name=None, task_filter=None):
# type: (Optional[Sequence[str]], Optional[str], Optional[str], Optional[Dict]) -> Sequence[Task]
"""
Get a list of Tasks by one of the following:
- A list of specific Task IDs.
- All Tasks in a project matching a full or partial Task name.
- All Tasks in any project matching a full or partial Task name.
:param list(str) task_ids: The Ids (system UUID) of experiments to get.
If ``task_ids`` specified, then ``project_name`` and ``task_name`` are ignored.
:param str project_name: The project name of the Tasks to get. To get the experiment
in all projects, use the default value of ``None``. (Optional)
:param str task_name: The full name or partial name of the Tasks to match within the specified
``project_name`` (or all projects if ``project_name`` is ``None``).
This method supports regular expressions for name matching. (Optional)
:param list(str) task_ids: list of unique task id string (if exists other parameters are ignored)
:param str project_name: project name (str) the task belongs to (use None for all projects)
:param str task_name: task name (str) in within the selected project
Return any partial match of task_name, regular expressions matching is also supported
If None is passed, returns all tasks within the project
:param dict task_filter: filter and order Tasks. See service.tasks.GetAllRequest for details
:return: The Tasks specified by the parameter combinations (see the parameters).
"""
return cls.__get_tasks(task_ids=task_ids, project_name=project_name,
task_name=task_name, **(task_filter or {}))
@property
def output_uri(self):
# type: () -> str
return self.storage_uri
@output_uri.setter
def output_uri(self, value):
# type: (Union[str, bool]) -> None
# check if this is boolean
if value is False:
value = None
elif value is True:
value = self.__default_output_uri or self._get_default_report_storage_uri()
# check if we have the correct packages / configuration
if value and value != self.storage_uri:
from .storage.helper import StorageHelper
helper = StorageHelper.get(value)
if not helper:
raise ValueError("Could not get access credentials for '{}' "
", check configuration file ~/clearml.conf".format(value))
helper.check_write_permissions(value)
self.storage_uri = value
@property
def artifacts(self):
# type: () -> Dict[str, Artifact]
"""
A read-only dictionary of Task artifacts (name, artifact).
:return: The artifacts.
"""
if not Session.check_min_api_version('2.3'):
return ReadOnlyDict()
artifacts_pairs = []
if self.data.execution and self.data.execution.artifacts:
artifacts_pairs = [(a.key, Artifact(a)) for a in self.data.execution.artifacts]
if self._artifacts_manager:
artifacts_pairs += list(self._artifacts_manager.registered_artifacts.items())
return ReadOnlyDict(artifacts_pairs)
@property
def models(self):
# type: () -> Mapping[str, Sequence[Model]]
"""
Read-only dictionary of the Task's loaded/stored models.
:return: A dictionary-like object with "input"/"output" keys and input/output properties, pointing to a
list-like object containing of Model objects. Each list-like object also acts as a dictionary, mapping
model name to a appropriate model instance.
Get input/output models:
.. code-block:: py
task.models.input
task.models["input"]
task.models.output
task.models["output"]
Get the last output model:
.. code-block:: py
task.models.output[-1]
Get a model by name:
.. code-block:: py
task.models.output["model name"]
"""
return self.get_models()
@property
def logger(self):
# type: () -> Logger
"""
Get a Logger object for reporting, for this task context. You can view all Logger report output associated with
the Task for which this method is called, including metrics, plots, text, tables, and images, in the
**ClearML Web-App (UI)**.
:return: The Logger object for the current Task (experiment).
"""
return self.get_logger()
@classmethod
def clone(
cls,
source_task=None, # type: Optional[Union[Task, str]]
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
):
# type: (...) -> Task
"""
Create a duplicate (a clone) of a Task (experiment). The status of the cloned Task is ``Draft``
and modifiable.
Use this method to manage experiments and for autoML.
:param str source_task: The Task to clone. Specify a Task object or a Task ID. (Optional)
:param str name: The name of the new cloned Task. (Optional)
:param str comment: A comment / description for the new cloned Task. (Optional)
:param str parent: The Id of the parent Task of the new Task.
- If ``parent`` is not specified, then ``parent`` is set to ``source_task.parent``.
- If ``parent`` is not specified and ``source_task.parent`` is not available, then
``parent`` set to to ``source_task``.
:param str project: The Id of the project in which to create the new Task.
If ``None``, the new task inherits the original Task's project. (Optional)
:return: The new cloned Task (experiment).
"""
assert isinstance(source_task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("ClearML-server does not support DevOps features, "
"upgrade clearml-server to 0.12.0 or above")
task_id = source_task if isinstance(source_task, six.string_types) else source_task.id
if not parent:
if isinstance(source_task, six.string_types):
source_task = cls.get_task(task_id=source_task)
parent = source_task.id if not source_task.parent else source_task.parent
elif isinstance(parent, Task):
parent = parent.id
cloned_task_id = cls._clone_task(cloned_task_id=task_id, name=name, comment=comment,
parent=parent, project=project)
cloned_task = cls.get_task(task_id=cloned_task_id)
return cloned_task
@classmethod
def enqueue(cls, task, queue_name=None, queue_id=None):
# type: (Union[Task, str], Optional[str], Optional[str]) -> Any
"""
Enqueue a Task for execution, by adding it to an execution queue.
.. note::
A worker daemon must be listening at the queue for the worker to fetch the Task and execute it,
see `Use Case Examples <../clearml_agent_ref/#use-case-examples>`_ on the "ClearML Agent
Reference page.
:param Task/str task: The Task to enqueue. Specify a Task object or Task ID.
:param str queue_name: The name of the queue. If not specified, then ``queue_id`` must be specified.
:param str queue_id: The Id of the queue. If not specified, then ``queue_name`` must be specified.
:return: An enqueue JSON response.
.. code-block:: javascript
{
"queued": 1,
"updated": 1,
"fields": {
"status": "queued",
"status_reason": "",
"status_message": "",
"status_changed": "2020-02-24T15:05:35.426770+00:00",
"last_update": "2020-02-24T15:05:35.426770+00:00",
"execution.queue": "2bd96ab2d9e54b578cc2fb195e52c7cf"
}
}
- ``queued`` - The number of Tasks enqueued (an integer or ``null``).
- ``updated`` - The number of Tasks updated (an integer or ``null``).
- ``fields``
- ``status`` - The status of the experiment.
- ``status_reason`` - The reason for the last status change.
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time (ISO 8601 format).
- ``last_update`` - The last Task update time, including Task creation, update, change, or events for
this task (ISO 8601 format).
- ``execution.queue`` - The Id of the queue where the Task is enqueued. ``null`` indicates not enqueued.
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("ClearML-server does not support DevOps features, "
"upgrade clearml-server to 0.12.0 or above")
# make sure we have wither name ot id
mutually_exclusive(queue_name=queue_name, queue_id=queue_id)
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
if not queue_id:
req = queues.GetAllRequest(name=exact_match_regex(queue_name), only_fields=["id"])
res = cls._send(session=session, req=req)
if not res.response.queues:
raise ValueError('Could not find queue named "{}"'.format(queue_name))
queue_id = res.response.queues[0].id
if len(res.response.queues) > 1:
LoggerRoot.get_base_logger().info("Multiple queues with name={}, selecting queue id={}".format(
queue_name, queue_id))
req = tasks.EnqueueRequest(task=task_id, queue=queue_id)
res = cls._send(session=session, req=req)
resp = res.response
return resp
@classmethod
def dequeue(cls, task):
# type: (Union[Task, str]) -> Any
"""
Dequeue (remove) a Task from an execution queue.
:param Task/str task: The Task to dequeue. Specify a Task object or Task ID.
:return: A dequeue JSON response.
.. code-block:: javascript
{
"dequeued": 1,
"updated": 1,
"fields": {
"status": "created",
"status_reason": "",
"status_message": "",
"status_changed": "2020-02-24T16:43:43.057320+00:00",
"last_update": "2020-02-24T16:43:43.057320+00:00",
"execution.queue": null
}
}
- ``dequeued`` - The number of Tasks enqueued (an integer or ``null``).
- ``fields``
- ``status`` - The status of the experiment.
- ``status_reason`` - The reason for the last status change.
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time in ISO 8601 format.
- ``last_update`` - The last time the Task was created, updated,
changed or events for this task were reported.
- ``execution.queue`` - The Id of the queue where the Task is enqueued. ``null`` indicates not enqueued.
- ``updated`` - The number of Tasks updated (an integer or ``null``).
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("ClearML-server does not support DevOps features, "
"upgrade clearml-server to 0.12.0 or above")
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
req = tasks.DequeueRequest(task=task_id)
res = cls._send(session=session, req=req)
resp = res.response
return resp
def add_tags(self, tags):
# type: (Union[Sequence[str], str]) -> None
"""
Add Tags to this task. Old tags are not deleted. When executing a Task (experiment) remotely,
this method has no effect).
:param tags: A list of tags which describe the Task to add.
"""
if isinstance(tags, six.string_types):
tags = tags.split(" ")
self.data.tags = list(set((self.data.tags or []) + tags))
self._edit(tags=self.data.tags)
def connect(self, mutable, name=None):
# type: (Any, Optional[str]) -> Any
"""
Connect an object to a Task object. This connects an experiment component (part of an experiment) to the
experiment. For example, connect hyperparameters or models.
:param object mutable: The experiment component to connect. The object can be any object Task supports
integrating, including:
- argparse - An argparse object for parameters.
- dict - A dictionary for parameters.
- TaskParameters - A TaskParameters object.
- Model - A model object for initial model warmup, or for model update/snapshot uploading.
- Class type - A Class type, storing all class properties (excluding '_' prefix properties)
- Object - A class instance, storing all instance properties (excluding '_' prefix properties)
:param str name: A section name associated with the connected object. Default: 'General'
Currently only supported for `dict` / `TaskParameter` objects
Examples:
name='General' will put the connected dictionary under the General section in the hyper-parameters
name='Train' will put the connected dictionary under the Train section in the hyper-parameters
:return: The result returned when connecting the object, if supported.
:raise: Raise an exception on unsupported objects.
"""
# dispatching by match order
dispatch = (
(OutputModel, self._connect_output_model),
(InputModel, self._connect_input_model),
(ArgumentParser, self._connect_argparse),
(dict, self._connect_dictionary),
(TaskParameters, self._connect_task_parameters),
(type, self._connect_object),
(object, self._connect_object),
)
multi_config_support = Session.check_min_api_version('2.9')
if multi_config_support and not name and not isinstance(mutable, (OutputModel, InputModel)):
name = self._default_configuration_section_name
if not multi_config_support and name and name != self._default_configuration_section_name:
raise ValueError("Multiple configurations is not supported with the current 'clearml-server', "
"please upgrade to the latest version")
for mutable_type, method in dispatch:
if isinstance(mutable, mutable_type):
return method(mutable, name=name)
raise Exception('Unsupported mutable type %s: no connect function found' % type(mutable).__name__)
def connect_configuration(self, configuration, name=None, description=None):
# type: (Union[Mapping, Path, str], Optional[str], Optional[str]) -> Union[dict, Path, str]
"""
Connect a configuration dictionary or configuration file (pathlib.Path / str) to a Task object.
This method should be called before reading the configuration file.
Later, when creating an output model, the model will include the contents of the configuration dictionary
or file.
For example, a local file:
.. code-block:: py
config_file = task.connect_configuration(config_file)
my_params = json.load(open(config_file,'rt'))
A parameter dictionary:
.. code-block:: py
my_params = task.connect_configuration(my_params)
:param configuration: The configuration. This is usually the configuration used in the model training process.
Specify one of the following:
- A dictionary - A dictionary containing the configuration. ClearML stores the configuration in
the **ClearML Server** (backend), in a HOCON format (JSON-like format) which is editable.
- A ``pathlib2.Path`` string - A path to the configuration file. ClearML stores the content of the file.
A local path must be relative path. When executing a Task remotely in a worker, the contents brought
from the **ClearML Server** (backend) overwrites the contents of the file.
:param str name: Configuration section name. default: 'General'
Allowing users to store multiple configuration dicts/files
:param str description: Configuration section description (text). default: None
:return: If a dictionary is specified, then a dictionary is returned. If pathlib2.Path / string is
specified, then a path to a local configuration file is returned. Configuration object.
"""
pathlib_Path = None # noqa
if not isinstance(configuration, (dict, Path, six.string_types)):
try:
from pathlib import Path as pathlib_Path # noqa
except ImportError:
pass
if not pathlib_Path or not isinstance(configuration, pathlib_Path):
raise ValueError("connect_configuration supports `dict`, `str` and 'Path' types, "
"{} is not supported".format(type(configuration)))
multi_config_support = Session.check_min_api_version('2.9')
if multi_config_support and not name:
name = self._default_configuration_section_name
if not multi_config_support and name and name != self._default_configuration_section_name:
raise ValueError("Multiple configurations is not supported with the current 'clearml-server', "
"please upgrade to the latest version")
# parameter dictionary
if isinstance(configuration, dict):
def _update_config_dict(task, config_dict):
if multi_config_support:
# noinspection PyProtectedMember
task._set_configuration(
name=name, description=description, config_type='dictionary', config_dict=config_dict)
else:
# noinspection PyProtectedMember
task._set_model_config(config_dict=config_dict)
if not running_remotely() or not (self.is_main_task() or self._is_remote_main_task()):
if multi_config_support:
self._set_configuration(
name=name, description=description, config_type='dictionary', config_dict=configuration)
else:
self._set_model_config(config_dict=configuration)
configuration = ProxyDictPostWrite(self, _update_config_dict, **configuration)
else:
# noinspection PyBroadException
try:
remote_configuration = self._get_configuration_dict(name=name) \
if multi_config_support else self._get_model_config_dict()
except Exception:
remote_configuration = None
if remote_configuration is None:
LoggerRoot.get_base_logger().warning(
"Could not retrieve remote configuration named \'{}\'\n"
"Using default configuration: {}".format(name, str(configuration)))
# update back configuration section
if multi_config_support:
self._set_configuration(
name=name, description=description,
config_type='dictionary', config_dict=configuration)
return configuration
configuration.clear()
configuration.update(remote_configuration)
configuration = ProxyDictPreWrite(False, False, **configuration)
return configuration
# it is a path to a local file
if not running_remotely() or not (self.is_main_task() or self._is_remote_main_task()):
# check if not absolute path
configuration_path = Path(configuration)
if not configuration_path.is_file():
ValueError("Configuration file does not exist")
try:
with open(configuration_path.as_posix(), 'rt') as f:
configuration_text = f.read()
except Exception:
raise ValueError("Could not connect configuration file {}, file could not be read".format(
configuration_path.as_posix()))
if multi_config_support:
self._set_configuration(
name=name, description=description,
config_type=configuration_path.suffixes[-1].lstrip('.')
if configuration_path.suffixes and configuration_path.suffixes[-1] else 'file',
config_text=configuration_text)
else:
self._set_model_config(config_text=configuration_text)
return configuration
else:
configuration_text = self._get_configuration_text(name=name) if multi_config_support \
else self._get_model_config_text()
if configuration_text is None:
LoggerRoot.get_base_logger().warning(
"Could not retrieve remote configuration named \'{}\'\n"
"Using default configuration: {}".format(name, str(configuration)))
# update back configuration section
if multi_config_support:
configuration_path = Path(configuration)
if configuration_path.is_file():
with open(configuration_path.as_posix(), 'rt') as f:
configuration_text = f.read()
self._set_configuration(
name=name, description=description,
config_type=configuration_path.suffixes[-1].lstrip('.')
if configuration_path.suffixes and configuration_path.suffixes[-1] else 'file',
config_text=configuration_text)
return configuration
configuration_path = Path(configuration)
fd, local_filename = mkstemp(prefix='clearml_task_config_',
suffix=configuration_path.suffixes[-1] if
configuration_path.suffixes else '.txt')
os.write(fd, configuration_text.encode('utf-8'))
os.close(fd)
if pathlib_Path:
return pathlib_Path(local_filename)
return Path(local_filename) if isinstance(configuration, Path) else local_filename
def connect_label_enumeration(self, enumeration):
# type: (Dict[str, int]) -> Dict[str, int]
"""
Connect a label enumeration dictionary to a Task (experiment) object.
Later, when creating an output model, the model will include the label enumeration dictionary.
:param dict enumeration: A label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
'background': 0,
'person': 1
}
:return: The label enumeration dictionary (JSON).
"""
if not isinstance(enumeration, dict):
raise ValueError("connect_label_enumeration supports only `dict` type, "
"{} is not supported".format(type(enumeration)))
if not running_remotely() or not (self.is_main_task() or self._is_remote_main_task()):
self.set_model_label_enumeration(enumeration)
else:
# pop everything
enumeration.clear()
enumeration.update(self.get_labels_enumeration())
return enumeration
def get_logger(self):
# type: () -> Logger
"""
Get a Logger object for reporting, for this task context. You can view all Logger report output associated with
the Task for which this method is called, including metrics, plots, text, tables, and images, in the
**ClearML Web-App (UI)**.
:return: The Logger for the Task (experiment).
"""
return self._get_logger(auto_connect_streams=self._log_to_backend)
def mark_started(self, force=False):
# type: (bool) -> ()
"""
Manually mark a Task as started (happens automatically)
:param bool force: If True the task status will be changed to `started` regardless of the current Task state.
"""
# UI won't let us see metrics if we're not started
self.started(force=force)
self.reload()
def mark_stopped(self, force=False):
# type: (bool) -> ()
"""
Manually mark a Task as stopped (also used in :meth:`_at_exit`)
:param bool force: If True the task status will be changed to `stopped` regardless of the current Task state.
"""
# flush any outstanding logs
self.flush(wait_for_uploads=True)
# mark task as stopped
self.stopped(force=force)
def flush(self, wait_for_uploads=False):
# type: (bool) -> bool
"""
Flush any outstanding reports or console logs.
:param bool wait_for_uploads: Wait for all outstanding uploads to complete
- ``True`` - Wait
- ``False`` - Do not wait (default)
"""
# make sure model upload is done
if BackendModel.get_num_results() > 0 and wait_for_uploads:
BackendModel.wait_for_results()
# flush any outstanding logs
if self._logger:
# noinspection PyProtectedMember
self._logger._flush_stdout_handler()
if self.__reporter:
self.__reporter.flush()
# if wait_for_uploads:
# self.__reporter.wait_for_events()
LoggerRoot.flush()
return True
def reset(self, set_started_on_success=False, force=False):
# type: (bool, bool) -> None
"""
Reset a Task. ClearML reloads a Task after a successful reset.
When a worker executes a Task remotely, the Task does not reset unless
the ``force`` parameter is set to ``True`` (this avoids accidentally clearing logs and metrics).
:param bool set_started_on_success: If successful, automatically set the Task to `started`
- ``True`` - If successful, set to started.
- ``False`` - If successful, do not set to started. (default)
:param bool force: Force a Task reset, even when executing the Task (experiment) remotely in a worker
- ``True`` - Force
- ``False`` - Do not force (default)
"""
if not running_remotely() or not self.is_main_task() or force:
super(Task, self).reset(set_started_on_success=set_started_on_success)
def close(self):
"""
Close the current Task. Enables you to manually shutdown the task.
.. warning::
Only call :meth:`Task.close` if you are certain the Task is not needed.
"""
if self._at_exit_called:
return
# store is main before we call at_exit, because will will Null it
is_main = self.is_main_task()
is_sub_process = self.__is_subprocess()
# wait for repository detection (5 minutes should be reasonable time to detect all packages)
if self._logger and not self.__is_subprocess():
self._wait_for_repo_detection(timeout=300.)
self.__shutdown()
# unregister atexit callbacks and signal hooks, if we are the main task
if is_main:
self.__register_at_exit(None)
if not is_sub_process:
# make sure we enable multiple Task.init callas with reporting sub-processes
BackgroundMonitor.clear_main_process(self)
# noinspection PyProtectedMember
Logger._remove_std_logger()
def delete(self, delete_artifacts_and_models=True, skip_models_used_by_other_tasks=True, raise_on_error=False):
# type: (bool, bool, bool) -> bool
"""
Delete the task as well as it's output models and artifacts.
Models and artifacts are deleted from their storage locations, each using its URI.
Note: in order to delete models and artifacts using their URI, make sure the proper storage credentials are
configured in your configuration file (e.g. if an artifact is stored in S3, make sure sdk.aws.s3.credentials
are properly configured and that you have delete permission in the related buckets).
:param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True)
:param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True)
:param raise_on_error: If True an exception will be raised when encountering an error.
If False an error would be printed and no exception will be raised.
:return: True if the task was deleted successfully.
"""
if not running_remotely() or not self.is_main_task():
return super(Task, self)._delete(
delete_artifacts_and_models=delete_artifacts_and_models,
skip_models_used_by_other_tasks=skip_models_used_by_other_tasks,
raise_on_error=raise_on_error,
)
return False
def register_artifact(self, name, artifact, metadata=None, uniqueness_columns=True):
# type: (str, pandas.DataFrame, Dict, Union[bool, Sequence[str]]) -> None
"""
Register (add) an artifact for the current Task. Registered artifacts are dynamically sychronized with the
**ClearML Server** (backend). If a registered artifact is updated, the update is stored in the
**ClearML Server** (backend). Registered artifacts are primarily used for Data Audition.
The currently supported registered artifact object type is a pandas.DataFrame.
See also :meth:`Task.unregister_artifact` and :meth:`Task.get_registered_artifacts`.
.. note::
ClearML also supports uploaded artifacts which are one-time uploads of static artifacts that are not
dynamically sychronized with the **ClearML Server** (backend). These static artifacts include
additional object types. For more information, see :meth:`Task.upload_artifact`.
:param str name: The name of the artifact.
.. warning::
If an artifact with the same name was previously registered, it is overwritten.
:param object artifact: The artifact object.
:param dict metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **ClearML Web-App (UI)**, **ARTIFACTS** tab.
:param uniqueness_columns: A Sequence of columns for artifact uniqueness comparison criteria, or the default
value of ``True``. If ``True``, the artifact uniqueness comparison criteria is all the columns,
which is the same as ``artifact.columns``.
"""
if not isinstance(uniqueness_columns, CollectionsSequence) and uniqueness_columns is not True:
raise ValueError('uniqueness_columns should be a List (sequence) or True')
if isinstance(uniqueness_columns, str):
uniqueness_columns = [uniqueness_columns]
self._artifacts_manager.register_artifact(
name=name, artifact=artifact, metadata=metadata, uniqueness_columns=uniqueness_columns)
def unregister_artifact(self, name):
# type: (str) -> None
"""
Unregister (remove) a registered artifact. This removes the artifact from the watch list that ClearML uses
to synchronize artifacts with the **ClearML Server** (backend).
.. important::
- Calling this method does not remove the artifact from a Task. It only stops ClearML from
monitoring the artifact.
- When this method is called, ClearML immediately takes the last snapshot of the artifact.
"""
self._artifacts_manager.unregister_artifact(name=name)
def get_registered_artifacts(self):
# type: () -> Dict[str, Artifact]
"""
Get a dictionary containing the Task's registered (dynamically synchronized) artifacts (name, artifact object).
.. note::
After calling ``get_registered_artifacts``, you can still modify the registered artifacts.
:return: The registered (dynamically synchronized) artifacts.
"""
return self._artifacts_manager.registered_artifacts
def upload_artifact(
self,
name, # type: str
artifact_object, # type: Union[str, Mapping, pandas.DataFrame, numpy.ndarray, Image.Image, Any]
metadata=None, # type: Optional[Mapping]
delete_after_upload=False, # type: bool
auto_pickle=True, # type: bool
preview=None, # type: Any
wait_on_upload=False, # type: bool
):
# type: (...) -> bool
"""
Upload (add) a static artifact to a Task object. The artifact is uploaded in the background.
The currently supported upload (static) artifact types include:
- string / pathlib2.Path - A path to artifact file. If a wildcard or a folder is specified, then ClearML
creates and uploads a ZIP file.
- dict - ClearML stores a dictionary as ``.json`` file and uploads it.
- pandas.DataFrame - ClearML stores a pandas.DataFrame as ``.csv.gz`` (compressed CSV) file and uploads it.
- numpy.ndarray - ClearML stores a numpy.ndarray as ``.npz`` file and uploads it.
- PIL.Image - ClearML stores a PIL.Image as ``.png`` file and uploads it.
- Any - If called with auto_pickle=True, the object will be pickled and uploaded.
:param str name: The artifact name.
.. warning::
If an artifact with the same name was previously uploaded, then it is overwritten.
:param object artifact_object: The artifact object.
:param dict metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **ClearML Web-App (UI)**, **ARTIFACTS** tab.
:param bool delete_after_upload: After the upload, delete the local copy of the artifact
- ``True`` - Delete the local copy of the artifact.
- ``False`` - Do not delete. (default)
:param bool auto_pickle: If True (default) and the artifact_object is not one of the following types:
pathlib2.Path, dict, pandas.DataFrame, numpy.ndarray, PIL.Image, url (string), local_file (string)
the artifact_object will be pickled and uploaded as pickle file artifact (with file extension .pkl)
:param Any preview: The artifact preview
:param bool wait_on_upload: Whether or not the upload should be synchronous, forcing the upload to complete
before continuing.
:return: The status of the upload.
- ``True`` - Upload succeeded.
- ``False`` - Upload failed.
:raise: If the artifact object type is not supported, raise a ``ValueError``.
"""
return self._artifacts_manager.upload_artifact(
name=name, artifact_object=artifact_object, metadata=metadata, delete_after_upload=delete_after_upload,
auto_pickle=auto_pickle, preview=preview, wait_on_upload=wait_on_upload)
def get_models(self):
# type: () -> Mapping[str, Sequence[Model]]
"""
Return a dictionary with {'input': [], 'output': []} loaded/stored models of the current Task
Input models are files loaded in the task, either manually or automatically logged
Output models are files stored in the task, either manually or automatically logged
Automatically logged frameworks are for example: TensorFlow, Keras, PyTorch, ScikitLearn(joblib) etc.
:return: A dictionary-like object with "input"/"output" keys and input/output properties, pointing to a
list-like object containing of Model objects. Each list-like object also acts as a dictionary, mapping
model name to a appropriate model instance.
Example:
.. code-block:: py
{'input': [clearml.Model()], 'output': [clearml.Model()]}
"""
return TaskModels(self)
def is_current_task(self):
# type: () -> bool
"""
.. deprecated:: 0.13.0
This method is deprecated. Use :meth:`Task.is_main_task` instead.
Is this Task object the main execution Task (initially returned by :meth:`Task.init`)
:return: Is this Task object the main execution Task
- ``True`` - Is the main execution Task.
- ``False`` - Is not the main execution Task.
"""
return self.is_main_task()
def is_main_task(self):
# type: () -> bool
"""
Is this Task object the main execution Task (initially returned by :meth:`Task.init`)
.. note::
If :meth:`Task.init` was never called, this method will *not* create
it, making this test more efficient than:
.. code-block:: py
Task.init() == task
:return: Is this Task object the main execution Task
- ``True`` - Is the main execution Task.
- ``False`` - Is not the main execution Task.
"""
return self is self.__main_task
def set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
self._set_model_config(config_text=config_text, config_dict=config_dict)
def get_model_config_text(self):
# type: () -> str
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
return self._get_model_config_text()
def get_model_config_dict(self):
# type: () -> Dict
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
return self._get_model_config_dict()
def set_model_label_enumeration(self, enumeration=None):
# type: (Optional[Mapping[str, int]]) -> ()
"""
Set the label enumeration for the Task object before creating an output model.
Later, when creating an output model, the model will inherit these properties.
:param dict enumeration: A label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
'background': 0,
'person': 1
}
"""
super(Task, self).set_model_label_enumeration(enumeration=enumeration)
def get_last_iteration(self):
# type: () -> int
"""
Get the last reported iteration, which is the last iteration for which the Task reported a metric.
.. note::
The maximum reported iteration is not in the local cache. This method
sends a request to the **ClearML Server** (backend).
:return: The last reported iteration number.
"""
self._reload_last_iteration()
return max(self.data.last_iteration or 0, self.__reporter.max_iteration if self.__reporter else 0)
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set initial iteration, instead of zero. Useful when continuing training from previous checkpoints
:param int offset: Initial iteration (at starting point)
:return: Newly set initial offset.
"""
return super(Task, self).set_initial_iteration(offset=offset)
def get_initial_iteration(self):
# type: () -> int
"""
Return the initial iteration offset, default is 0
Useful when continuing training from previous checkpoints
:return: Initial iteration offset.
"""
return super(Task, self).get_initial_iteration()
def get_last_scalar_metrics(self):
# type: () -> Dict[str, Dict[str, Dict[str, float]]]
"""
Get the last scalar metrics which the Task reported. This is a nested dictionary, ordered by title and series.
For example:
.. code-block:: javascript
{
'title': {
'series': {
'last': 0.5,
'min': 0.1,
'max': 0.9
}
}
}
:return: The last scalar metrics.
"""
self.reload()
metrics = self.data.last_metrics
scalar_metrics = dict()
for i in metrics.values():
for j in i.values():
scalar_metrics.setdefault(j['metric'], {}).setdefault(
j['variant'], {'last': j['value'], 'min': j['min_value'], 'max': j['max_value']})
return scalar_metrics
def get_parameters_as_dict(self):
# type: () -> Dict
"""
Get the Task parameters as a raw nested dictionary.
.. note::
The values are not parsed. They are returned as is.
"""
return naive_nested_from_flat_dictionary(self.get_parameters())
def set_parameters_as_dict(self, dictionary):
# type: (Dict) -> None
"""
Set the parameters for the Task object from a dictionary. The dictionary can be nested.
This does not link the dictionary to the Task object. It does a one-time update. This
is the same behavior as the :meth:`Task.connect` method.
"""
self._arguments.copy_from_dict(flatten_dictionary(dictionary))
def get_user_properties(self, value_only=False):
# type: (bool) -> Dict[str, Union[str, dict]]
"""
Get user properties for this task.
Returns a dictionary mapping user property name to user property details dict.
:param value_only: If True, returned user property details will be a string representing the property value.
"""
if not Session.check_min_api_version("2.9"):
self.log.info("User properties are not supported by the server")
return {}
section = "properties"
params = self._hyper_params_manager.get_hyper_params(
sections=[section], projector=attrgetter("value") if value_only else None
)
return dict(params.get(section, {}))
def set_user_properties(
self,
*iterables, # type: Union[Mapping[str, Union[str, dict, None]], Iterable[dict]]
**properties # type: Union[str, dict, int, float, None]
):
# type: (...) -> bool
"""
Set user properties for this task.
A user property can contain the following fields (all of type string):
name / value / description / type
Examples:
task.set_user_properties(backbone='great', stable=True)
task.set_user_properties(backbone={"type": int, "description": "network type", "value": "great"}, )
task.set_user_properties(
{"name": "backbone", "description": "network type", "value": "great"},
{"name": "stable", "description": "is stable", "value": True},
)
:param iterables: Properties iterables, each can be:
* A dictionary of string key (name) to either a string value (value) a dict (property details). If the value
is a dict, it must contain a "value" field. For example:
.. code-block:: javascript
{
"property_name": {"description": "This is a user property", "value": "property value"},
"another_property_name": {"description": "This is user property", "value": "another value"},
"yet_another_property_name": "some value"
}
* An iterable of dicts (each representing property details). Each dict must contain a "name" field and a
"value" field. For example:
.. code-block:: javascript
[
{
"name": "property_name",
"description": "This is a user property",
"value": "property value"
},
{
"name": "another_property_name",
"description": "This is another user property",
"value": "another value"
}
]
:param properties: Additional properties keyword arguments. Key is the property name, and value can be
a string (property value) or a dict (property details). If the value is a dict, it must contain a "value"
field. For example:
.. code-block:: javascript
{
"property_name": "string as property value",
"another_property_name": {
"type": "string",
"description": "This is user property",
"value": "another value"
}
}
"""
if not Session.check_min_api_version("2.9"):
self.log.info("User properties are not supported by the server")
return False
return self._hyper_params_manager.edit_hyper_params(
iterables=list(properties.items()) + (
list(iterables.items()) if isinstance(iterables, dict) else list(iterables)),
replace='none',
force_section="properties",
)
def delete_user_properties(self, *iterables):
# type: (Iterable[Union[dict, Iterable[str, str]]]) -> bool
"""
Delete hyper-parameters for this task.
:param iterables: Hyper parameter key iterables. Each an iterable whose possible values each represent
a hyper-parameter entry to delete, value formats are:
* A dictionary containing a 'section' and 'name' fields
* An iterable (e.g. tuple, list etc.) whose first two items denote 'section' and 'name'
"""
if not Session.check_min_api_version("2.9"):
self.log.info("User properties are not supported by the server")
return False
return self._hyper_params_manager.delete_hyper_params(*iterables)
def set_base_docker(self, docker_cmd, docker_arguments=None, docker_setup_bash_script=None):
# type: (str, Optional[Union[str, Sequence[str]]], Optional[Union[str, Sequence[str]]]) -> ()
"""
Set the base docker image for this experiment
If provided, this value will be used by clearml-agent to execute this experiment
inside the provided docker image.
When running remotely the call is ignored
:param docker_cmd: docker container image (example: 'nvidia/cuda:11.1')
:param docker_arguments: docker execution parameters (example: '-e ENV=1')
:param docker_setup_bash_script: bash script to run at the
beginning of the docker before launching the Task itself. example: ['apt update', 'apt-get install -y gcc']
"""
if not self.running_locally() and self.is_main_task():
return
super(Task, self).set_base_docker(
docker_cmd=docker_cmd,
docker_arguments=docker_arguments,
docker_setup_bash_script=docker_setup_bash_script
)
def set_resource_monitor_iteration_timeout(self, seconds_from_start=1800):
# type: (float) -> bool
"""
Set the ResourceMonitor maximum duration (in seconds) to wait until first scalar/plot is reported.
If timeout is reached without any reporting, the ResourceMonitor will start reporting machine statistics based
on seconds from Task start time (instead of based on iteration)
:param seconds_from_start: Maximum number of seconds to wait for scalar/plot reporting before defaulting
to machine statistics reporting based on seconds from experiment start time
:return: True if success
"""
if not self._resource_monitor:
return False
self._resource_monitor.wait_for_first_iteration = seconds_from_start
self._resource_monitor.max_check_first_iteration = seconds_from_start
return True
def execute_remotely(self, queue_name=None, clone=False, exit_process=True):
# type: (Optional[str], bool, bool) -> Optional[Task]
"""
If task is running locally (i.e., not by ``clearml-agent``), then clone the Task and enqueue it for remote
execution; or, stop the execution of the current Task, reset its state, and enqueue it. If ``exit==True``,
*exit* this process.
.. note::
If the task is running remotely (i.e., ``clearml-agent`` is executing it), this call is a no-op
(i.e., does nothing).
:param queue_name: The queue name used for enqueueing the task. If ``None``, this call exits the process
without enqueuing the task.
:param clone: Clone the Task and execute the newly cloned Task
The values are:
- ``True`` - A cloned copy of the Task will be created, and enqueued, instead of this Task.
- ``False`` - The Task will be enqueued.
:param exit_process: The function call will leave the calling process at the end
- ``True`` - Exit the process (exit(0)).
- ``False`` - Do not exit the process.
.. warning::
If ``clone==False``, then ``exit_process`` must be ``True``.
:return Task: return the task object of the newly generated remotely executing task
"""
# do nothing, we are running remotely
if running_remotely() and self.is_main_task():
return None
if not self.is_main_task():
LoggerRoot.get_base_logger().warning(
"Calling task.execute_remotely is only supported on main Task (created with Task.init)\n"
"Defaulting to self.enqueue(queue_name={})".format(queue_name)
)
if not queue_name:
raise ValueError("queue_name must be provided")
enqueue_task = Task.clone(source_task=self) if clone else self
Task.enqueue(task=enqueue_task, queue_name=queue_name)
return
if not clone and not exit_process:
raise ValueError(
"clone==False and exit_process==False is not supported. "
"Task enqueuing itself must exit the process afterwards.")
# make sure we analyze the process
if self.status in (Task.TaskStatusEnum.in_progress, ):
if clone:
# wait for repository detection (5 minutes should be reasonable time to detect all packages)
self.flush(wait_for_uploads=True)
if self._logger and not self.__is_subprocess():
self._wait_for_repo_detection(timeout=300.)
else:
# close ourselves (it will make sure the repo is updated)
self.close()
# clone / reset Task
if clone:
task = Task.clone(self)
else:
task = self
# check if the server supports enqueueing aborted/stopped Tasks
if Session.check_min_api_server_version('2.13'):
self.mark_stopped(force=True)
else:
self.reset()
# enqueue ourselves
if queue_name:
Task.enqueue(task, queue_name=queue_name)
LoggerRoot.get_base_logger().warning(
'Switching to remote execution, output log page {}'.format(task.get_output_log_web_page()))
else:
# Remove the development system tag
system_tags = [t for t in task.get_system_tags() if t != self._development_tag]
self.set_system_tags(system_tags)
# leave this process.
if exit_process:
LoggerRoot.get_base_logger().warning('Terminating local execution process')
leave_process(0)
return task
def create_function_task(self, func, func_name=None, task_name=None, **kwargs):
# type: (Callable, Optional[str], Optional[str], **Optional[Any]) -> Optional[Task]
"""
Create a new task, and call ``func`` with the specified kwargs.
One can think of this call as remote forking, where the newly created instance is the new Task
calling the specified func with the appropriate kwargs and leave once the func terminates.
Notice that a remote executed function cannot create another child remote executed function.
.. note::
- Must be called from the main Task, i.e. the one created by Task.init(...)
- The remote Tasks inherits the environment from the creating Task
- In the remote Task, the entrypoint is the same as the creating Task
- In the remote Task, the execution is the same until reaching this function call
:param func: A function to execute remotely as a single Task.
On the remote executed Task the entry-point and the environment are copied from this
calling process, only this function call redirect the the execution flow to the called func,
alongside the passed arguments
:param func_name: A unique identifier of the function. Default the function name without the namespace.
For example Class.foo() becomes 'foo'
:param task_name: The newly create Task name. Default: the calling Task name + function name
:param kwargs: name specific arguments for the target function.
These arguments will appear under the configuration, "Function" section
:return Task: Return the newly created Task or None if running remotely and execution is skipped
"""
if not self.is_main_task():
raise ValueError("Only the main Task object can call create_function_task()")
if not callable(func):
raise ValueError("func must be callable")
if not Session.check_min_api_version('2.9'):
raise ValueError("Remote function execution is not supported, "
"please upgrade to the latest server version")
func_name = str(func_name or func.__name__).strip()
if func_name in self._remote_functions_generated:
raise ValueError("Function name must be unique, a function by the name '{}' "
"was already created by this Task.".format(func_name))
section_name = 'Function'
tag_name = 'func'
func_marker = '__func_readonly__'
# sanitize the dict, leave only basic types that we might want to override later in the UI
func_params = {k: v for k, v in kwargs.items() if verify_basic_value(v)}
func_params[func_marker] = func_name
# do not query if we are running locally, there is no need.
task_func_marker = self.running_locally() or self.get_parameter('{}/{}'.format(section_name, func_marker))
# if we are running locally or if we are running remotely but we are not a forked tasks
# condition explained:
# (1) running in development mode creates all the forked tasks
# (2) running remotely but this is not one of the forked tasks (i.e. it is missing the fork tag attribute)
if self.running_locally() or not task_func_marker:
self._wait_for_repo_detection(300)
task = self.clone(self, name=task_name or '{} <{}>'.format(self.name, func_name), parent=self.id)
task.set_system_tags((task.get_system_tags() or []) + [tag_name])
task.connect(func_params, name=section_name)
self._remote_functions_generated[func_name] = task.id
return task
# check if we are one of the generated functions and if this is us,
# if we are not the correct function, not do nothing and leave
if task_func_marker != func_name:
self._remote_functions_generated[func_name] = len(self._remote_functions_generated) + 1
return
# mark this is us:
self._remote_functions_generated[func_name] = self.id
# this is us for sure, let's update the arguments and call the function
self.connect(func_params, name=section_name)
func_params.pop(func_marker, None)
kwargs.update(func_params)
func(**kwargs)
# This is it, leave the process
leave_process(0)
def wait_for_status(
self,
status=(_Task.TaskStatusEnum.completed, _Task.TaskStatusEnum.stopped, _Task.TaskStatusEnum.closed),
raise_on_status=(_Task.TaskStatusEnum.failed,),
check_interval_sec=60.,
):
# type: (Iterable[Task.TaskStatusEnum], Optional[Iterable[Task.TaskStatusEnum]], float) -> ()
"""
Wait for a task to reach a defined status.
:param status: Status to wait for. Defaults to ('completed', 'stopped', 'closed', )
:param raise_on_status: Raise RuntimeError if the status of the tasks matches one of these values.
Defaults to ('failed').
:param check_interval_sec: Interval in seconds between two checks. Defaults to 60 seconds.
:raise: RuntimeError if the status is one of {raise_on_status}.
"""
stopped_status = list(status) + (list(raise_on_status) if raise_on_status else [])
while self.status not in stopped_status:
time.sleep(check_interval_sec)
if raise_on_status and self.status in raise_on_status:
raise RuntimeError("Task {} has status: {}.".format(self.task_id, self.status))
# make sure we have the Task object
self.reload()
def export_task(self):
# type: () -> dict
"""
Export Task's configuration into a dictionary (for serialization purposes).
A Task can be copied/modified by calling Task.import_task()
Notice: Export task does not include the tasks outputs, such as results
(scalar/plots etc.) or Task artifacts/models
:return: dictionary of the Task's configuration.
"""
self.reload()
export_data = self.data.to_dict()
export_data.pop('last_metrics', None)
export_data.pop('last_iteration', None)
export_data.pop('status_changed', None)
export_data.pop('status_reason', None)
export_data.pop('status_message', None)
export_data.get('execution', {}).pop('artifacts', None)
export_data.get('execution', {}).pop('model', None)
export_data['project_name'] = self.get_project_name()
export_data['session_api_version'] = self.session.api_version
return export_data
def update_task(self, task_data):
# type: (dict) -> bool
"""
Update current task with configuration found on the task_data dictionary.
See also export_task() for retrieving Task configuration.
:param task_data: dictionary with full Task configuration
:return: return True if Task update was successful
"""
return bool(self.import_task(task_data=task_data, target_task=self, update=True))
@classmethod
def import_task(cls, task_data, target_task=None, update=False):
# type: (dict, Optional[Union[str, Task]], bool) -> Optional[Task]
"""
Import (create) Task from previously exported Task configuration (see Task.export_task)
Can also be used to edit/update an existing Task (by passing `target_task` and `update=True`).
:param task_data: dictionary of a Task's configuration
:param target_task: Import task_data into an existing Task. Can be either task_id (str) or Task object.
:param update: If True, merge task_data with current Task configuration.
:return: return True if Task was imported/updated
"""
# restore original API version (otherwise, we might not be able to restore the data correctly)
force_api_version = task_data.get('session_api_version') or None
original_api_version = Session.api_version
original_force_max_api_version = Session.force_max_api_version
if force_api_version:
Session.force_max_api_version = str(force_api_version)
if not target_task:
project_name = task_data.get('project_name') or Task._get_project_name(task_data.get('project', ''))
target_task = Task.create(project_name=project_name, task_name=task_data.get('name', None))
elif isinstance(target_task, six.string_types):
target_task = Task.get_task(task_id=target_task)
elif not isinstance(target_task, Task):
raise ValueError(
"`target_task` must be either Task id (str) or Task object, "
"received `target_task` type {}".format(type(target_task)))
target_task.reload()
cur_data = target_task.data.to_dict()
cur_data = merge_dicts(cur_data, task_data) if update else dict(**task_data)
cur_data.pop('id', None)
cur_data.pop('project', None)
# noinspection PyProtectedMember
valid_fields = list(tasks.EditRequest._get_data_props().keys())
cur_data = dict((k, cur_data[k]) for k in valid_fields if k in cur_data)
res = target_task._edit(**cur_data)
if res and res.ok():
target_task.reload()
else:
target_task = None
# restore current api version, and return a new instance if Task with the current version
if force_api_version:
Session.force_max_api_version = original_force_max_api_version
Session.api_version = original_api_version
if target_task:
target_task = Task.get_task(task_id=target_task.id)
return target_task
@classmethod
def import_offline_session(cls, session_folder_zip):
# type: (str) -> (Optional[str])
"""
Upload an off line session (execution) of a Task.
Full Task execution includes repository details, installed packages, artifacts, logs, metric and debug samples.
:param session_folder_zip: Path to a folder containing the session, or zip-file of the session folder.
:return: Newly created task ID (str)
"""
print('ClearML: Importing offline session from {}'.format(session_folder_zip))
temp_folder = None
if Path(session_folder_zip).is_file():
# unzip the file:
temp_folder = mkdtemp(prefix='clearml-offline-')
ZipFile(session_folder_zip).extractall(path=temp_folder)
session_folder_zip = temp_folder
session_folder = Path(session_folder_zip)
if not session_folder.is_dir():
raise ValueError("Could not find the session folder / zip-file {}".format(session_folder))
try:
with open((session_folder / cls._offline_filename).as_posix(), 'rt') as f:
export_data = json.load(f)
except Exception as ex:
raise ValueError(
"Could not read Task object {}: Exception {}".format(session_folder / cls._offline_filename, ex))
task = cls.import_task(export_data)
task.mark_started(force=True)
# fix artifacts
if task.data.execution.artifacts:
from . import StorageManager
# noinspection PyProtectedMember
offline_folder = os.path.join(export_data.get('offline_folder', ''), 'data/')
# noinspection PyProtectedMember
remote_url = task._get_default_report_storage_uri()
if remote_url and remote_url.endswith('/'):
remote_url = remote_url[:-1]
for artifact in task.data.execution.artifacts:
local_path = artifact.uri.replace(offline_folder, '', 1)
local_file = session_folder / 'data' / local_path
if local_file.is_file():
remote_path = local_path.replace(
'.{}{}'.format(export_data['id'], os.sep), '.{}{}'.format(task.id, os.sep), 1)
artifact.uri = '{}/{}'.format(remote_url, remote_path)
StorageManager.upload_file(local_file=local_file.as_posix(), remote_url=artifact.uri)
# noinspection PyProtectedMember
task._edit(execution=task.data.execution)
# logs
TaskHandler.report_offline_session(task, session_folder)
# metrics
Metrics.report_offline_session(task, session_folder)
# print imported results page
print('ClearML results page: {}'.format(task.get_output_log_web_page()))
task.completed()
# close task
task.close()
# cleanup
if temp_folder:
# noinspection PyBroadException
try:
shutil.rmtree(temp_folder)
except Exception:
pass
return task.id
@classmethod
def set_credentials(
cls,
api_host=None,
web_host=None,
files_host=None,
key=None,
secret=None,
store_conf_file=False
):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], bool) -> None
"""
Set new default **ClearML Server** (backend) host and credentials.
These credentials will be overridden by either OS environment variables, or the ClearML configuration
file, ``clearml.conf``.
.. warning::
Credentials must be set before initializing a Task object.
For example, to set credentials for a remote computer:
.. code-block:: py
Task.set_credentials(
api_host='http://localhost:8008', web_host='http://localhost:8080', files_host='http://localhost:8081',
key='optional_credentials', secret='optional_credentials'
)
task = Task.init('project name', 'experiment name')
:param str api_host: The API server url. For example, ``host='http://localhost:8008'``
:param str web_host: The Web server url. For example, ``host='http://localhost:8080'``
:param str files_host: The file server url. For example, ``host='http://localhost:8081'``
:param str key: The user key (in the key/secret pair). For example, ``key='thisisakey123'``
:param str secret: The user secret (in the key/secret pair). For example, ``secret='thisisseceret123'``
:param bool store_conf_file: If True store the current configuration into the ~/clearml.conf file.
If the configuration file exists, no change will be made (outputs a warning).
Not applicable when running remotely (i.e. clearml-agent).
"""
if api_host:
Session.default_host = api_host
if not running_remotely() and not ENV_HOST.get():
ENV_HOST.set(api_host)
if web_host:
Session.default_web = web_host
if not running_remotely() and not ENV_WEB_HOST.get():
ENV_WEB_HOST.set(web_host)
if files_host:
Session.default_files = files_host
if not running_remotely() and not ENV_FILES_HOST.get():
ENV_FILES_HOST.set(files_host)
if key:
Session.default_key = key
if not running_remotely():
ENV_ACCESS_KEY.set(key)
if secret:
Session.default_secret = secret
if not running_remotely():
ENV_SECRET_KEY.set(secret)
if store_conf_file and not running_remotely():
active_conf_file = get_active_config_file()
if active_conf_file:
getLogger().warning(
'Could not store credentials in configuration file, '
'\'{}\' already exists'.format(active_conf_file))
else:
conf = {'api': dict(
api_server=Session.default_host,
web_server=Session.default_web,
files_server=Session.default_files,
credentials=dict(access_key=Session.default_key, secret_key=Session.default_secret))}
with open(get_config_file(), 'wt') as f:
lines = json.dumps(conf, indent=4).split('\n')
f.write('\n'.join(lines[1:-1]))
@classmethod
def debug_simulate_remote_task(cls, task_id, reset_task=False):
# type: (str, bool) -> ()
"""
Simulate remote execution of a specified Task.
This call will simulate the behaviour of your Task as if executed by the ClearML-Agent
This means configurations will be coming from the backend server into the code
(the opposite from manual execution, where the backend logs the code arguments)
Use with care.
:param task_id: Task ID to simulate, notice that all configuration will be taken from the specified
Task, regardless of the code initial values, just like it as if executed by ClearML agent
:param reset_task: If True target Task, is automatically cleared / reset.
"""
# if we are already running remotely, do nothing
if running_remotely():
return
# verify Task ID exists
task = Task.get_task(task_id=task_id)
if not task:
raise ValueError("Task ID '{}' could not be found".format(task_id))
if reset_task:
task.reset(set_started_on_success=False, force=True)
from .config.remote import override_current_task_id
from .config.defs import LOG_TO_BACKEND_ENV_VAR
override_current_task_id(task_id)
LOG_TO_BACKEND_ENV_VAR.set(True)
DEBUG_SIMULATE_REMOTE_TASK.set(True)
@classmethod
def _create(cls, project_name=None, task_name=None, task_type=TaskTypes.training):
# type: (Optional[str], Optional[str], Task.TaskTypes) -> Task
"""
Create a new unpopulated Task (experiment).
:param str project_name: The name of the project in which the experiment will be created.
If ``project_name`` is ``None``, and the main execution Task is initialized (see :meth:`Task.init`),
then the main execution Task's project is used. Otherwise, if the project does
not exist, it is created. (Optional)
:param str task_name: The name of Task (experiment).
:param TaskTypes task_type: The task type.
:return: The newly created task created.
"""
if not project_name:
if not cls.__main_task:
raise ValueError("Please provide project_name, no global task context found "
"(Task.current_task hasn't been called)")
project_name = cls.__main_task.get_project_name()
try:
task = cls(
private=cls.__create_protection,
project_name=project_name,
task_name=task_name,
task_type=task_type,
log_to_backend=False,
force_create=True,
)
except Exception:
raise
return task
def _set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
Set Task model configuration text/dict
:param config_text: model configuration (unconstrained text string). usually the content
of a configuration file. If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
# noinspection PyProtectedMember
design = OutputModel._resolve_config(config_text=config_text, config_dict=config_dict)
super(Task, self)._set_model_design(design=design)
def _get_model_config_text(self):
# type: () -> str
"""
Get Task model configuration text (before creating an output model)
When an output model is created it will inherit these properties
:return: The model config_text (unconstrained text string).
"""
return super(Task, self).get_model_design()
def _get_model_config_dict(self):
# type: () -> Dict
"""
Get Task model configuration dictionary (before creating an output model)
When an output model is created it will inherit these properties
:return: config_dict: model configuration parameters dictionary.
"""
config_text = self._get_model_config_text()
# noinspection PyProtectedMember
return OutputModel._text_to_config_dict(config_text)
@classmethod
def _reset_current_task_obj(cls):
if not cls.__main_task:
return
task = cls.__main_task
cls.__main_task = None
if task._dev_worker:
task._dev_worker.unregister()
task._dev_worker = None
@classmethod
def _has_current_task_obj(cls):
# type: () -> bool
return bool(cls.__main_task)
@classmethod
def _create_dev_task(
cls, default_project_name, default_task_name, default_task_type, tags,
reuse_last_task_id, continue_last_task=False, detect_repo=True, auto_connect_streams=True
):
if not default_project_name or not default_task_name:
# get project name and task name from repository name and entry_point
result, _ = ScriptInfo.get(create_requirements=False, check_uncommitted=False)
if not default_project_name:
# noinspection PyBroadException
try:
parts = result.script['repository'].split('/')
default_project_name = (parts[-1] or parts[-2]).replace('.git', '') or 'Untitled'
except Exception:
default_project_name = 'Untitled'
if not default_task_name:
# noinspection PyBroadException
try:
default_task_name = os.path.splitext(os.path.basename(result.script['entry_point']))[0]
except Exception:
pass
# conform reuse_last_task_id and continue_last_task
if continue_last_task and isinstance(continue_last_task, str):
reuse_last_task_id = continue_last_task
continue_last_task = True
# if we force no task reuse from os environment
if DEV_TASK_NO_REUSE.get() or not reuse_last_task_id or isinstance(reuse_last_task_id, str):
default_task = None
else:
# if we have a previous session to use, get the task id from it
default_task = cls.__get_last_used_task_id(
default_project_name,
default_task_name,
default_task_type.value,
)
closed_old_task = False
default_task_id = None
task = None
in_dev_mode = not running_remotely()
if in_dev_mode:
if isinstance(reuse_last_task_id, str) and reuse_last_task_id:
default_task_id = reuse_last_task_id
elif not reuse_last_task_id or not cls.__task_is_relevant(default_task):
default_task_id = None
else:
default_task_id = default_task.get('id') if default_task else None
if default_task_id:
try:
task = cls(
private=cls.__create_protection,
task_id=default_task_id,
log_to_backend=True,
)
# instead of resting the previously used task we are continuing the training with it.
if task and continue_last_task:
task.reload()
task.mark_started(force=True)
task.set_initial_iteration(task.get_last_iteration()+1)
else:
task_tags = task.data.system_tags if hasattr(task.data, 'system_tags') else task.data.tags
task_artifacts = task.data.execution.artifacts \
if hasattr(task.data.execution, 'artifacts') else None
if ((str(task._status) in (
str(tasks.TaskStatusEnum.published), str(tasks.TaskStatusEnum.closed)))
or task.output_models_id or (cls.archived_tag in task_tags)
or (cls._development_tag not in task_tags)
or task_artifacts):
# If the task is published or closed, we shouldn't reset it so we can't use it in dev mode
# If the task is archived, or already has an output model,
# we shouldn't use it in development mode either
default_task_id = None
task = None
else:
with task._edit_lock:
# from now on, there is no need to reload, we just clear stuff,
# this flag will be cleared off once we actually refresh at the end of the function
task._reload_skip_flag = True
# reset the task, so we can update it
task.reset(set_started_on_success=False, force=False)
# clear the heaviest stuff first
task._clear_task(
system_tags=[cls._development_tag],
comment=make_message('Auto-generated at %(time)s by %(user)s@%(host)s'))
except (Exception, ValueError):
# we failed reusing task, create a new one
default_task_id = None
# create a new task
if not default_task_id:
task = cls(
private=cls.__create_protection,
project_name=default_project_name,
task_name=default_task_name,
task_type=default_task_type,
log_to_backend=True,
)
# no need to reload yet, we clear this before the end of the function
task._reload_skip_flag = True
if in_dev_mode:
# update this session, for later use
cls.__update_last_used_task_id(default_project_name, default_task_name, default_task_type.value, task.id)
# set default docker image from env.
task._set_default_docker_image()
# mark us as the main Task, there should only be one dev Task at a time.
if not Task.__main_task:
Task.__main_task = task
# mark the task as started
task.started()
# reload, making sure we are synced
task._reload_skip_flag = False
task.reload()
# add Task tags
if tags:
task.add_tags([tags] if isinstance(tags, str) else tags)
# force update of base logger to this current task (this is the main logger task)
logger = task._get_logger(auto_connect_streams=auto_connect_streams)
if closed_old_task:
logger.report_text('ClearML Task: Closing old development task id={}'.format(default_task.get('id')))
# print warning, reusing/creating a task
if default_task_id and not continue_last_task:
logger.report_text('ClearML Task: overwriting (reusing) task id=%s' % task.id)
elif default_task_id and continue_last_task:
logger.report_text('ClearML Task: continuing previous task id=%s '
'Notice this run will not be reproducible!' % task.id)
else:
logger.report_text('ClearML Task: created new task id=%s' % task.id)
# update current repository and put warning into logs
if detect_repo:
# noinspection PyBroadException
try:
import traceback
stack = traceback.extract_stack(limit=10)
# NOTICE WE ARE ALWAYS 3 down from caller in stack!
for i in range(len(stack)-1, 0, -1):
# look for the Task.init call, then the one above it is the callee module
if stack[i].name == 'init':
task._calling_filename = os.path.abspath(stack[i-1].filename)
break
except Exception:
pass
if in_dev_mode and cls.__detect_repo_async:
task._detect_repo_async_thread = threading.Thread(target=task._update_repository)
task._detect_repo_async_thread.daemon = True
task._detect_repo_async_thread.start()
else:
task._update_repository()
# make sure we see something in the UI
thread = threading.Thread(target=LoggerRoot.flush)
thread.daemon = True
thread.start()
return task
def _get_logger(self, flush_period=NotSet, auto_connect_streams=False):
# type: (Optional[float], Union[bool, dict]) -> Logger
"""
get a logger object for reporting based on the task
:param flush_period: The period of the logger flush.
If None of any other False value, will not flush periodically.
If a logger was created before, this will be the new period and
the old one will be discarded.
:return: Logger object
"""
if not self._logger:
# do not recreate logger after task was closed/quit
if self._at_exit_called and self._at_exit_called in (True, get_current_thread_id(), ):
raise ValueError("Cannot use Task Logger after task was closed")
# Get a logger object
self._logger = Logger(
private_task=self,
connect_stdout=(auto_connect_streams is True) or
(isinstance(auto_connect_streams, dict) and auto_connect_streams.get('stdout', False)),
connect_stderr=(auto_connect_streams is True) or
(isinstance(auto_connect_streams, dict) and auto_connect_streams.get('stderr', False)),
connect_logging=isinstance(auto_connect_streams, dict) and auto_connect_streams.get('logging', False),
)
# make sure we set our reported to async mode
# we make sure we flush it in self._at_exit
self._reporter.async_enable = True
# if we just created the logger, set default flush period
if not flush_period or flush_period is self.NotSet:
flush_period = DevWorker.report_period
if isinstance(flush_period, (int, float)):
flush_period = int(abs(flush_period))
if flush_period is None or isinstance(flush_period, int):
self._logger.set_flush_period(flush_period)
return self._logger
def _connect_output_model(self, model, name=None):
assert isinstance(model, OutputModel)
model.connect(self, name=name)
return model
def _save_output_model(self, model):
"""
Deprecated: Save a reference to the connected output model.
:param model: The connected output model
"""
# deprecated
self._connected_output_model = model
def _reconnect_output_model(self):
"""
Deprecated: If there is a saved connected output model, connect it again.
This is needed if the input model is connected after the output model
is connected, an then we will have to get the model design from the
input model by reconnecting.
"""
# Deprecated:
if self._connected_output_model:
self.connect(self._connected_output_model)
def _connect_input_model(self, model, name=None):
assert isinstance(model, InputModel)
# we only allow for an input model to be connected once
# at least until we support multiple input models
# notice that we do not check the task's input model because we allow task reuse and overwrite
# add into comment that we are using this model
comment = self.comment or ''
if not comment.endswith('\n'):
comment += '\n'
comment += 'Using model id: {}'.format(model.id)
self.set_comment(comment)
model.connect(self, name)
return model
def _connect_argparse(self, parser, args=None, namespace=None, parsed_args=None, name=None):
# do not allow argparser to connect to jupyter notebook
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython # noqa
ip = get_ipython()
if ip is not None and 'IPKernelApp' in ip.config:
return parser
except Exception:
pass
if self.is_main_task():
argparser_update_currenttask(self)
if (parser is None or parsed_args is None) and argparser_parseargs_called():
# if we have a parser but nor parsed_args, we need to find the parser
if parser and not parsed_args:
for _parser, _parsed_args in get_argparser_last_args():
if _parser == parser:
parsed_args = _parsed_args
break
else:
# prefer the first argparser (hopefully it is more relevant?!
for _parser, _parsed_args in get_argparser_last_args():
if parser is None:
parser = _parser
if parsed_args is None and parser == _parser:
parsed_args = _parsed_args
if running_remotely() and (self.is_main_task() or self._is_remote_main_task()):
self._arguments.copy_to_parser(parser, parsed_args)
else:
self._arguments.copy_defaults_from_argparse(
parser, args=args, namespace=namespace, parsed_args=parsed_args)
return parser
def _connect_dictionary(self, dictionary, name=None):
def _update_args_dict(task, config_dict):
# noinspection PyProtectedMember
task._arguments.copy_from_dict(flatten_dictionary(config_dict), prefix=name)
def _refresh_args_dict(task, config_dict):
# reread from task including newly added keys
# noinspection PyProtectedMember
a_flat_dict = task._arguments.copy_to_dict(flatten_dictionary(config_dict), prefix=name)
# noinspection PyProtectedMember
nested_dict = config_dict._to_dict()
config_dict.clear()
config_dict.update(nested_from_flat_dictionary(nested_dict, a_flat_dict))
if not running_remotely() or not (self.is_main_task() or self._is_remote_main_task()):
self._arguments.copy_from_dict(flatten_dictionary(dictionary), prefix=name)
dictionary = ProxyDictPostWrite(self, _update_args_dict, **dictionary)
else:
flat_dict = flatten_dictionary(dictionary)
flat_dict = self._arguments.copy_to_dict(flat_dict, prefix=name)
dictionary = nested_from_flat_dictionary(dictionary, flat_dict)
dictionary = ProxyDictPostWrite(self, _refresh_args_dict, **dictionary)
return dictionary
def _connect_task_parameters(self, attr_class, name=None):
if running_remotely() and (self.is_main_task() or self._is_remote_main_task()):
parameters = self.get_parameters()
if not name:
attr_class.update_from_dict(parameters)
else:
attr_class.update_from_dict(
dict((k[len(name)+1:], v) for k, v in parameters.items() if k.startswith('{}/'.format(name))))
else:
self.set_parameters(attr_class.to_dict(), __parameters_prefix=name)
return attr_class
def _connect_object(self, an_object, name=None):
def verify_type(key, value):
if str(key).startswith('_') or not isinstance(value, self._parameters_allowed_types):
return False
# verify everything is json able (i.e. basic types)
try:
json.dumps(value)
return True
except TypeError:
return False
a_dict = {k: v for k, v in an_object.__dict__.items() if verify_type(k, v)}
if running_remotely() and (self.is_main_task() or self._is_remote_main_task()):
a_dict = self._connect_dictionary(a_dict, name)
for k, v in a_dict.items():
if getattr(an_object, k, None) != a_dict[k]:
setattr(an_object, k, v)
return an_object
else:
self._connect_dictionary(a_dict, name)
return an_object
def _validate(self, check_output_dest_credentials=False):
if running_remotely():
super(Task, self)._validate(check_output_dest_credentials=False)
def _dev_mode_stop_task(self, stop_reason, pid=None):
# make sure we do not get called (by a daemon thread) after at_exit
if self._at_exit_called:
return
self.log.warning(
"### TASK STOPPED - USER ABORTED - {} ###".format(
stop_reason.upper().replace('_', ' ')
)
)
self.flush(wait_for_uploads=True)
self.stopped()
if self._dev_worker:
self._dev_worker.unregister()
# NOTICE! This will end the entire execution tree!
if self.__exit_hook:
self.__exit_hook.remote_user_aborted = True
self._kill_all_child_processes(send_kill=False, pid=pid, allow_kill_calling_pid=False)
time.sleep(2.0)
self._kill_all_child_processes(send_kill=True, pid=pid, allow_kill_calling_pid=True)
os._exit(1) # noqa
@staticmethod
def _kill_all_child_processes(send_kill=False, pid=None, allow_kill_calling_pid=True):
# get current process if pid not provided
current_pid = os.getpid()
kill_ourselves = None
pid = pid or current_pid
try:
parent = psutil.Process(pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
# kill ourselves last (if we need to)
if child.pid == current_pid:
kill_ourselves = child
continue
if send_kill:
child.kill()
else:
child.terminate()
# parent ourselves
if allow_kill_calling_pid or parent.pid != current_pid:
if send_kill:
parent.kill()
else:
parent.terminate()
# kill ourselves if we need to:
if allow_kill_calling_pid and kill_ourselves:
if send_kill:
kill_ourselves.kill()
else:
kill_ourselves.terminate()
def _dev_mode_setup_worker(self):
if running_remotely() or not self.is_main_task() or self._at_exit_called or self._offline_mode:
return
if self._dev_worker:
return self._dev_worker
self._dev_worker = DevWorker()
self._dev_worker.register(self)
logger = self.get_logger()
flush_period = logger.get_flush_period()
if not flush_period or flush_period > self._dev_worker.report_period:
logger.set_flush_period(self._dev_worker.report_period)
def _wait_for_repo_detection(self, timeout=None):
# wait for detection repo sync
if not self._detect_repo_async_thread:
return
with self._repo_detect_lock:
if not self._detect_repo_async_thread:
return
# noinspection PyBroadException
try:
if self._detect_repo_async_thread.is_alive():
# if negative timeout, just kill the thread:
if timeout is not None and timeout < 0:
from .utilities.lowlevel.threads import kill_thread
kill_thread(self._detect_repo_async_thread)
else:
self.log.info('Waiting for repository detection and full package requirement analysis')
self._detect_repo_async_thread.join(timeout=timeout)
# because join has no return value
if self._detect_repo_async_thread.is_alive():
self.log.info('Repository and package analysis timed out ({} sec), '
'giving up'.format(timeout))
# done waiting, kill the thread
from .utilities.lowlevel.threads import kill_thread
kill_thread(self._detect_repo_async_thread)
else:
self.log.info('Finished repository detection and package analysis')
self._detect_repo_async_thread = None
except Exception:
pass
def _summary_artifacts(self):
# signal artifacts upload, and stop daemon
self._artifacts_manager.stop(wait=True)
# print artifacts summary (if not empty)
if self._artifacts_manager.summary:
self.get_logger().report_text(self._artifacts_manager.summary)
def _at_exit(self):
# protect sub-process at_exit (should never happen)
if self._at_exit_called and self._at_exit_called != get_current_thread_id():
return
# shutdown will clear the main, so we have to store it before.
# is_main = self.is_main_task()
self.__shutdown()
# In rare cases we might need to forcefully shutdown the process, currently we should avoid it.
# if is_main:
# # we have to forcefully shutdown if we have forked processes, sometimes they will get stuck
# os._exit(self.__exit_hook.exit_code if self.__exit_hook and self.__exit_hook.exit_code else 0)
def __shutdown(self):
"""
Will happen automatically once we exit code, i.e. atexit
:return:
"""
# protect sub-process at_exit
if self._at_exit_called:
# if we are called twice (signal in the middle of the shutdown),
# make sure we flush stdout, this is the best we can do.
if self._at_exit_called == get_current_thread_id() and self._logger and self.__is_subprocess():
self._logger.set_flush_period(None)
# noinspection PyProtectedMember
self._logger._close_stdout_handler(wait=True)
self._at_exit_called = True
return
# from here only a single thread can re-enter
self._at_exit_called = get_current_thread_id()
# disable lock on signal callbacks, to avoid deadlocks.
if self.__exit_hook and self.__exit_hook.signal is not None:
self.__edit_lock = False
is_sub_process = self.__is_subprocess()
# noinspection PyBroadException
try:
wait_for_uploads = True
# first thing mark task as stopped, so we will not end up with "running" on lost tasks
# if we are running remotely, the daemon will take care of it
task_status = None
wait_for_std_log = True
if (not running_remotely() or DEBUG_SIMULATE_REMOTE_TASK.get()) \
and self.is_main_task() and not is_sub_process:
# check if we crashed, ot the signal is not interrupt (manual break)
task_status = ('stopped', )
if self.__exit_hook:
is_exception = self.__exit_hook.exception
# check if we are running inside a debugger
if not is_exception and sys.modules.get('pydevd'):
# noinspection PyBroadException
try:
is_exception = sys.last_type
except Exception:
pass
# only if we have an exception (and not ctrl-break) or signal is not SIGTERM / SIGINT
if (is_exception and not isinstance(is_exception, KeyboardInterrupt)
and is_exception != KeyboardInterrupt) \
or (not self.__exit_hook.remote_user_aborted and
self.__exit_hook.signal not in (None, 2, 15)):
task_status = (
'failed',
'Exception {}'.format(is_exception) if is_exception else
'Signal {}'.format(self.__exit_hook.signal))
wait_for_uploads = False
else:
wait_for_uploads = (self.__exit_hook.remote_user_aborted or self.__exit_hook.signal is None)
if not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal is None and \
not is_exception:
task_status = ('completed', )
else:
task_status = ('stopped', )
# user aborted. do not bother flushing the stdout logs
wait_for_std_log = self.__exit_hook.signal is not None
# wait for repository detection (if we didn't crash)
if wait_for_uploads and self._logger:
# we should print summary here
self._summary_artifacts()
# make sure that if we crashed the thread we are not waiting forever
if not is_sub_process:
self._wait_for_repo_detection(timeout=10.)
# kill the repo thread (negative timeout, do not wait), if it hasn't finished yet.
if not is_sub_process:
self._wait_for_repo_detection(timeout=-1)
# wait for uploads
print_done_waiting = False
if wait_for_uploads and (BackendModel.get_num_results() > 0 or
(self.__reporter and self.__reporter.events_waiting())):
self.log.info('Waiting to finish uploads')
print_done_waiting = True
# from here, do not send log in background thread
if wait_for_uploads:
self.flush(wait_for_uploads=True)
# wait until the reporter flush everything
if self.__reporter:
self.__reporter.stop()
if self.is_main_task():
# notice: this will close the reporting for all the Tasks in the system
Metrics.close_async_threads()
# notice: this will close the jupyter monitoring
ScriptInfo.close()
if self.is_main_task():
# noinspection PyBroadException
try:
from .storage.helper import StorageHelper
StorageHelper.close_async_threads()
except Exception:
pass
if print_done_waiting:
self.log.info('Finished uploading')
# elif self._logger:
# # noinspection PyProtectedMember
# self._logger._flush_stdout_handler()
# from here, do not check worker status
if self._dev_worker:
self._dev_worker.unregister()
self._dev_worker = None
# stop resource monitoring
if self._resource_monitor:
self._resource_monitor.stop()
self._resource_monitor = None
if self._logger:
self._logger.set_flush_period(None)
# noinspection PyProtectedMember
self._logger._close_stdout_handler(wait=wait_for_uploads or wait_for_std_log)
if not is_sub_process:
# change task status
if not task_status:
pass
elif task_status[0] == 'failed':
self.mark_failed(status_reason=task_status[1])
elif task_status[0] == 'completed':
self.completed()
elif task_status[0] == 'stopped':
self.stopped()
# this is so in theory we can close a main task and start a new one
if self.is_main_task():
Task.__main_task = None
except Exception:
# make sure we do not interrupt the exit process
pass
# make sure we store last task state
if self._offline_mode and not is_sub_process:
# noinspection PyBroadException
try:
# create zip file
offline_folder = self.get_offline_mode_folder()
zip_file = offline_folder.as_posix() + '.zip'
with ZipFile(zip_file, 'w', allowZip64=True, compression=ZIP_DEFLATED) as zf:
for filename in offline_folder.rglob('*'):
if filename.is_file():
relative_file_name = filename.relative_to(offline_folder).as_posix()
zf.write(filename.as_posix(), arcname=relative_file_name)
print('ClearML Task: Offline session stored in {}'.format(zip_file))
except Exception:
pass
# delete locking object (lock file)
if self._edit_lock:
# noinspection PyBroadException
try:
del self.__edit_lock
except Exception:
pass
self._edit_lock = None
# make sure no one will re-enter the shutdown method
self._at_exit_called = True
BackgroundMonitor.wait_for_sub_process(self)
@classmethod
def __register_at_exit(cls, exit_callback, only_remove_signal_and_exception_hooks=False):
class ExitHooks(object):
_orig_exit = None
_orig_exc_handler = None
remote_user_aborted = False
def __init__(self, callback):
self.exit_code = None
self.exception = None
self.signal = None
self._exit_callback = callback
self._org_handlers = {}
self._signal_recursion_protection_flag = False
self._except_recursion_protection_flag = False
def update_callback(self, callback):
if self._exit_callback and not six.PY2:
# noinspection PyBroadException
try:
atexit.unregister(self._exit_callback)
except Exception:
pass
self._exit_callback = callback
if callback:
self.hook()
else:
# un register int hook
if self._orig_exc_handler:
sys.excepthook = self._orig_exc_handler
self._orig_exc_handler = None
for h in self._org_handlers:
# noinspection PyBroadException
try:
signal.signal(h, self._org_handlers[h])
except Exception:
pass
self._org_handlers = {}
def hook(self):
if self._orig_exit is None:
self._orig_exit = sys.exit
sys.exit = self.exit
if self._orig_exc_handler is None:
self._orig_exc_handler = sys.excepthook
sys.excepthook = self.exc_handler
if self._exit_callback:
atexit.register(self._exit_callback)
# TODO: check if sub-process hooks are safe enough, for the time being allow it
if not self._org_handlers: # ## and not Task._Task__is_subprocess():
if sys.platform == 'win32':
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE]
else:
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE, signal.SIGQUIT]
for c in catch_signals:
# noinspection PyBroadException
try:
self._org_handlers[c] = signal.getsignal(c)
signal.signal(c, self.signal_handler)
except Exception:
pass
def exit(self, code=0):
self.exit_code = code
self._orig_exit(code)
def exc_handler(self, exctype, value, traceback, *args, **kwargs):
if self._except_recursion_protection_flag:
# noinspection PyArgumentList
return sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = True
self.exception = value
if self._orig_exc_handler:
# noinspection PyArgumentList
ret = self._orig_exc_handler(exctype, value, traceback, *args, **kwargs)
else:
# noinspection PyNoneFunctionAssignment, PyArgumentList
ret = sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = False
return ret
def signal_handler(self, sig, frame):
self.signal = sig
org_handler = self._org_handlers.get(sig)
signal.signal(sig, org_handler or signal.SIG_DFL)
# if this is a sig term, we wait until __at_exit is called (basically do nothing)
if sig == signal.SIGINT:
# return original handler result
return org_handler if not callable(org_handler) else org_handler(sig, frame)
if self._signal_recursion_protection_flag:
# call original
os.kill(os.getpid(), sig)
return org_handler if not callable(org_handler) else org_handler(sig, frame)
self._signal_recursion_protection_flag = True
# call exit callback
if self._exit_callback:
# noinspection PyBroadException
try:
self._exit_callback()
except Exception:
pass
# remove stdout logger, just in case
# noinspection PyBroadException
try:
# noinspection PyProtectedMember
Logger._remove_std_logger()
except Exception:
pass
# noinspection PyUnresolvedReferences
os.kill(os.getpid(), sig)
self._signal_recursion_protection_flag = False
# return handler result
return org_handler if not callable(org_handler) else org_handler(sig, frame)
# we only remove the signals since this will hang subprocesses
if only_remove_signal_and_exception_hooks:
if not cls.__exit_hook:
return
if cls.__exit_hook._orig_exc_handler:
sys.excepthook = cls.__exit_hook._orig_exc_handler
cls.__exit_hook._orig_exc_handler = None
for s in cls.__exit_hook._org_handlers:
# noinspection PyBroadException
try:
signal.signal(s, cls.__exit_hook._org_handlers[s])
except Exception:
pass
cls.__exit_hook._org_handlers = {}
return
if cls.__exit_hook is None:
# noinspection PyBroadException
try:
cls.__exit_hook = ExitHooks(exit_callback)
cls.__exit_hook.hook()
except Exception:
cls.__exit_hook = None
else:
cls.__exit_hook.update_callback(exit_callback)
@classmethod
def _remove_at_exit_callbacks(cls):
cls.__register_at_exit(None, only_remove_signal_and_exception_hooks=True)
@classmethod
def __get_task(cls, task_id=None, project_name=None, task_name=None, include_archived=True, task_filter=None):
if task_id:
return cls(private=cls.__create_protection, task_id=task_id, log_to_backend=False)
if project_name:
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
else:
project = None
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
task_filter = task_filter or {}
if not include_archived:
task_filter['system_tags'] = ['-{}'.format(cls.archived_tag)]
res = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(
project=[project.id] if project else None,
name=exact_match_regex(task_name) if task_name else None,
only_fields=['id', 'name', 'last_update', system_tags],
**task_filter
)
)
res_tasks = res.response.tasks
# if we have more than one result, filter out the 'archived' results
# notice that if we only have one result we do get the archived one as well.
if len(res_tasks) > 1:
filtered_tasks = [t for t in res_tasks if not getattr(t, system_tags, None) or
cls.archived_tag not in getattr(t, system_tags, None)]
# if we did not filter everything (otherwise we have only archived tasks, so we return them)
if filtered_tasks:
res_tasks = filtered_tasks
task = get_single_result(entity='task', query=task_name, results=res_tasks, raise_on_error=False)
if not task:
return None
return cls(
private=cls.__create_protection,
task_id=task.id,
log_to_backend=False,
)
@classmethod
def __get_tasks(cls, task_ids=None, project_name=None, task_name=None, **kwargs):
if task_ids:
if isinstance(task_ids, six.string_types):
task_ids = [task_ids]
return [cls(private=cls.__create_protection, task_id=task_id, log_to_backend=False)
for task_id in task_ids]
return [cls(private=cls.__create_protection, task_id=task.id, log_to_backend=False)
for task in cls._query_tasks(project_name=project_name, task_name=task_name, **kwargs)]
@classmethod
def _query_tasks(cls, task_ids=None, project_name=None, task_name=None, **kwargs):
if not task_ids:
task_ids = None
elif isinstance(task_ids, six.string_types):
task_ids = [task_ids]
if project_name:
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
else:
project = None
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
only_fields = ['id', 'name', 'last_update', system_tags]
if kwargs and kwargs.get('only_fields'):
only_fields = list(set(kwargs.pop('only_fields')) | set(only_fields))
res = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(
id=task_ids,
project=[project.id] if project else kwargs.pop('project', None),
name=task_name if task_name else None,
only_fields=only_fields,
**kwargs
)
)
return res.response.tasks
@classmethod
def __get_hash_key(cls, *args):
def normalize(x):
return "<{}>".format(x) if x is not None else ""
return ":".join(map(normalize, args))
@classmethod
def __get_last_used_task_id(cls, default_project_name, default_task_name, default_task_type):
hash_key = cls.__get_hash_key(
cls._get_api_server(), default_project_name, default_task_name, default_task_type)
# check if we have a cached task_id we can reuse
# it must be from within the last 24h and with the same project/name/type
task_sessions = SessionCache.load_dict(str(cls))
task_data = task_sessions.get(hash_key)
if task_data is None:
return None
try:
task_data['type'] = cls.TaskTypes(task_data['type'])
except (ValueError, KeyError):
LoggerRoot.get_base_logger().warning(
"Corrupted session cache entry: {}. "
"Unsupported task type: {}"
"Creating a new task.".format(hash_key, task_data['type']),
)
return None
return task_data
@classmethod
def __update_last_used_task_id(cls, default_project_name, default_task_name, default_task_type, task_id):
hash_key = cls.__get_hash_key(
cls._get_api_server(), default_project_name, default_task_name, default_task_type)
task_id = str(task_id)
# update task session cache
task_sessions = SessionCache.load_dict(str(cls))
last_task_session = {'time': time.time(), 'project': default_project_name, 'name': default_task_name,
'type': default_task_type, 'id': task_id}
# remove stale sessions
for k in list(task_sessions.keys()):
if ((time.time() - task_sessions[k].get('time', 0)) >
60 * 60 * cls.__task_id_reuse_time_window_in_hours):
task_sessions.pop(k)
# update current session
task_sessions[hash_key] = last_task_session
# store
SessionCache.store_dict(str(cls), task_sessions)
@classmethod
def __task_timed_out(cls, task_data):
return \
task_data and \
task_data.get('id') and \
task_data.get('time') and \
(time.time() - task_data.get('time')) > (60 * 60 * cls.__task_id_reuse_time_window_in_hours)
@classmethod
def __get_task_api_obj(cls, task_id, only_fields=None):
if not task_id or cls._offline_mode:
return None
all_tasks = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(id=[task_id], only_fields=only_fields),
).response.tasks
# The task may not exist in environment changes
if not all_tasks:
return None
return all_tasks[0]
@classmethod
def __task_is_relevant(cls, task_data):
"""
Check that a cached task is relevant for reuse.
A task is relevant for reuse if:
1. It is not timed out i.e it was last use in the previous 24 hours.
2. It's name, project and type match the data in the server, so not
to override user changes made by using the UI.
:param task_data: A mapping from 'id', 'name', 'project', 'type' keys
to the task's values, as saved in the cache.
:return: True, if the task is relevant for reuse. False, if not.
"""
if not task_data:
return False
if cls.__task_timed_out(task_data):
return False
task_id = task_data.get('id')
if not task_id:
return False
# noinspection PyBroadException
try:
task = cls.__get_task_api_obj(task_id, ('id', 'name', 'project', 'type'))
except Exception:
task = None
if task is None:
return False
project_name = None
if task.project:
# noinspection PyBroadException
try:
project = cls._send(
cls._get_default_session(),
projects.GetByIdRequest(project=task.project)
).response.project
if project:
project_name = project.name
except Exception:
pass
if task_data.get('type') and \
task_data.get('type') not in (cls.TaskTypes.training, cls.TaskTypes.testing) and \
not Session.check_min_api_version(2.8):
print('WARNING: Changing task type to "{}" : '
'clearml-server does not support task type "{}", '
'please upgrade clearml-server.'.format(cls.TaskTypes.training, task_data['type'].value))
task_data['type'] = cls.TaskTypes.training
compares = (
(task.name, 'name'),
(project_name, 'project'),
(task.type, 'type'),
)
# compare after casting to string to avoid enum instance issues
# remember we might have replaced the api version by now, so enums are different
return all(six.text_type(server_data) == six.text_type(task_data.get(task_data_key))
for server_data, task_data_key in compares)
@classmethod
def __close_timed_out_task(cls, task_data):
if not task_data:
return False
task = cls.__get_task_api_obj(task_data.get('id'), ('id', 'status'))
if task is None:
return False
stopped_statuses = (
str(tasks.TaskStatusEnum.stopped),
str(tasks.TaskStatusEnum.published),
str(tasks.TaskStatusEnum.publishing),
str(tasks.TaskStatusEnum.closed),
str(tasks.TaskStatusEnum.failed),
str(tasks.TaskStatusEnum.completed),
)
if str(task.status) not in stopped_statuses:
cls._send(
cls._get_default_session(),
tasks.StoppedRequest(
task=task.id,
force=True,
status_message="Stopped timed out development task"
),
)
return True
return False
|
cifar10_main.py
|
import argparse
import datetime
import getpass
import logging
import os
import shutil
import threading
import time
from urllib import parse
import tensorflow as tf
import tensorflow.keras.backend as ktf
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.training import TrainSpec, EvalSpec, train_and_evaluate
import cifar10_data
import cifar10_model_cnn
import cifar10_model_resnet
from utils import ExamplesPerSecondHook
print(tf.__version__)
tf.logging.set_verbosity(tf.logging.DEBUG)
# start = time.time()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
# config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
ktf.set_session(sess) # set this TensorFlow session as the default session for Keras
shapes = (32, 32, 3), 10
input_name = 'conv2d_input'
# tf.enable_eager_execution()
model_dir_hdfs = False
is_training = False
log = logging.getLogger('tensorflow')
def main(mname, model_dir, batch_size, epochs, eval_steps, eps_log_steps):
global model_dir_hdfs
if model_dir.startswith('hdfs'):
model_dir_hdfs = True
tf.logging.set_verbosity(tf.logging.DEBUG)
# get TF logger
log.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create file handler which logs even debug messages
if model_dir_hdfs is False:
if os.path.exists(model_dir) is False:
os.makedirs(model_dir)
log_dir = model_dir
else:
model_dir = os.path.join(model_dir, "job_cifar10_" + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
log_dir = '.'
# clear old log files
with open(log_dir + '/tensorflow.log', 'w'):
pass
with open(log_dir + '/gpu.csv', 'w'):
pass
with open(log_dir + '/cpu.csv', 'w'):
pass
fh = logging.FileHandler(log_dir + '/tensorflow.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
log.info("TF version: %s", tf.__version__)
log.info("Model directory: %s", model_dir)
log.info("Batch size: %s", batch_size)
log.info("Prefetch data all to memory: %s", True)
log.info("Train epochs: %s", epochs)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
ktf.set_session(sess) # set this TensorFlow session as the default session for Keras
steps_per_epoch = cifar10_data.train_len() / batch_size
log.info("Steps per epoch: %s", steps_per_epoch)
if eval_steps is None:
eval_steps = steps_per_epoch
log.info("Evaluating each %i steps", eval_steps)
if mname == "cnn":
model = cifar10_model_cnn.cifar_model()
else:
model = cifar10_model_resnet.cifar_model()
global input_name
input_name = 'input_1'
model.summary()
def train_input_fn():
dataset = tf.data.Dataset.from_generator(generator=cifar10_data.generator_train,
output_types=(tf.float32, tf.float32),
output_shapes=shapes)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=batch_size)
# dataset = dataset.repeat(20)
iterator = dataset.make_one_shot_iterator()
features_tensors, labels = iterator.get_next()
features = {input_name: features_tensors}
return features, labels
def eval_input_fn():
dataset = tf.data.Dataset.from_generator(generator=cifar10_data.generator_test,
output_types=(tf.float32, tf.float32),
output_shapes=shapes)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=batch_size)
iterator = dataset.make_one_shot_iterator()
features_tensors, labels = iterator.get_next()
features = {input_name: features_tensors}
return features, labels
my_config = RunConfig(
save_checkpoints_steps=eval_steps # Save checkpoints every n steps and run the evaluation.
# keep_checkpoint_max = 5 # Retain the n most recent checkpoints (default 5).
)
estimator = tf.keras.estimator.model_to_estimator(model, config=my_config, model_dir=model_dir)
examples_sec_hook = ExamplesPerSecondHook(batch_size, every_n_steps=eps_log_steps)
# stopping_hook = early_stopping.stop_if_higher_hook(estimator, "accuracy", 0.5)
train_hooks = [examples_sec_hook]
train_spec = TrainSpec(input_fn=train_input_fn, hooks=train_hooks,
max_steps=cifar10_data.train_len() / batch_size * epochs)
eval_spec = EvalSpec(input_fn=eval_input_fn, steps=cifar10_data.val_len() / batch_size,
throttle_secs=5) # default 100 steps
global is_training
is_training = True
threading.Thread(target=lambda: collect_stats(log_dir)).start()
start = time.time()
train_and_evaluate(estimator, train_spec, eval_spec)
elapsed = time.time() - start
is_training = False
log.info("total time taken (seconds): %s ", elapsed)
if model_dir_hdfs:
parse_res = parse.urlsplit(model_dir)
netloc = parse_res[1]
path = parse_res[2]
webhdfs_model_dir = 'http://' + netloc + ':50070/webhdfs/v1' + path
username = getpass.getuser()
component_name = estimator.config.task_type + str(estimator.config.task_id)
log.info("Uploading log files for %s as %s to HDFS path: %s", component_name, username, webhdfs_model_dir)
logging.shutdown()
os.system('curl -L -i -T tensorflow.log "' + webhdfs_model_dir +
'/tensorflow-' + component_name + '.log?op=CREATE&overwrite=false&user.name=' + username + '"')
os.system('curl -L -i -T cpu.csv "' + webhdfs_model_dir +
'/cpu-' + component_name + '.csv?op=CREATE&overwrite=false&user.name=' + username + '"')
os.system('curl -L -i -T gpu.csv "' + webhdfs_model_dir +
'/gpu-' + component_name + '.csv?op=CREATE&overwrite=false&user.name=' + username + '"')
else:
log.info("Creating zip archive of job results")
logging.shutdown()
shutil.make_archive(model_dir, 'zip', model_dir)
def collect_stats(log_dir):
log.info("Starting statistic collector")
gpu_cmd = "echo $(date '+%Y-%m-%d %H:%M:%S'), $(nvidia-smi --format=csv,noheader " \
"--query-gpu=power.draw,utilization.gpu,temperature.gpu) >> " + log_dir + "/gpu.csv"
cpu_cmd = "echo $(date '+%Y-%m-%d %H:%M:%S'), $(ps -p " + str(os.getpid()) + \
" -o %cpu,%mem --noheaders) >> " + log_dir + "/cpu.csv"
while is_training:
os.system(gpu_cmd)
os.system(cpu_cmd)
time.sleep(2)
log.info("Finishing statistic collector")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--mname',
type=str,
default="cnn",
help='Model to use, cnn or resnet.')
parser.add_argument(
'--batch_size',
type=int,
default=64,
help='Batch size.')
parser.add_argument(
'--epochs',
type=int,
default=100,
help='Number of epochs to train.')
parser.add_argument(
'--eval_steps',
type=int,
default=None,
help='Run the evaluation every n steps.')
parser.add_argument(
'--eps_log_steps',
type=int,
default=50,
help='Log examples per second every n steps.')
parser.add_argument(
'--model_dir',
type=str,
default="job_cirfar10_" + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'),
# default = "hdfs://l01abdpmn008.cnbdp.bmwgroup.net/user/amila/cifar10/model_dir"
help='The directory where the checkpoint and summaries are stored.')
args = parser.parse_args()
main(**vars(args))
|
bucketcapture.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 24 20:46:25 2017
@author: mtkes
"""
## NOTE: OpenCV interface to camera controls is sketchy
## use v4l2-ctl directly for explicit control
## example for dark picture: v4l2-ctl -c exposure_auto=1 -c exposure_absolute=10
# import the necessary packages
import cv2
from subprocess import call
from threading import Lock
from threading import Thread
from threading import Condition
import platform
import datetime
import numpy as np
import platform
import subprocess
# import our classes
from framerate import FrameRate
from frameduration import FrameDuration
class BucketCapture:
def __init__(self,name,src,width,height,exposure,set_fps=30):
# Default fps to 30
print("Creating BucketCapture for " + name)
self._lock = Lock()
self._condition = Condition()
self.fps = FrameRate()
self.set_fps = set_fps
self.duration = FrameDuration()
self.name = name
self.exposure = exposure
self.iso = 800
self.brightness = 1
self.src = src
self.width = width
self.height = height
# initialize the variable used to indicate if the thread should
# be stopped
self._stop = False
self.stopped = True
self.grabbed = False
self.frame = None
self.timestamp = "timestamp_goes_here"
self.outFrame = None
self.count = 0
self.outCount = self.count
print("BucketCapture created for " + self.name)
def start(self):
# start the thread to read frames from the video stream
print("STARTING BucketCapture for " + self.name)
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
print("BucketCapture for " + self.name + " RUNNING")
# keep looping infinitely until the thread is stopped
self.stopped = False
self.fps.start()
lastExposure = self.exposure
if platform.system() == "Linux":
cmd = ['v4l2-ctl', '--device='+str(self.src),'--list-formats-ext']
returned_output = subprocess.check_output(cmd)
print(returned_output.decode("utf-8"))
cmd = ['v4l2-ctl', '--list-ctrls']
returned_output = subprocess.check_output(cmd)
print(returned_output.decode("utf-8"))
self.camera = cv2.VideoCapture(self.src,apiPreference=cv2.CAP_ANY)
# OpenCV VideoCapture properties that can be set()
# CV_CAP_PROP_POS_MSEC Current position of the video file in milliseconds.
# CV_CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next.
# CV_CAP_PROP_POS_AVI_RATIO Relative position of the video file: 0 - start of the film, 1 - end of the film.
# CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
# CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
# CV_CAP_PROP_FPS Frame rate.
# CV_CAP_PROP_FOURCC 4-character code of codec.
# CV_CAP_PROP_FRAME_COUNT Number of frames in the video file.
# CV_CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() .
# CV_CAP_PROP_MODE Backend-specific value indicating the current capture mode.
# CV_CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras).
# CV_CAP_PROP_CONTRAST Contrast of the image (only for cameras).
# CV_CAP_PROP_SATURATION Saturation of the image (only for cameras).
# CV_CAP_PROP_HUE Hue of the image (only for cameras).
# CV_CAP_PROP_GAIN Gain of the image (only for cameras).
# CV_CAP_PROP_EXPOSURE Exposure (only for cameras).
# CV_CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB.
# CV_CAP_PROP_WHITE_BALANCE_U The U value of the whitebalance setting (note: only supported by DC1394 v 2.x backend currently)
# CV_CAP_PROP_WHITE_BALANCE_V The V value of the whitebalance setting (note: only supported by DC1394 v 2.x backend currently)
# CV_CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
# CV_CAP_PROP_ISO_SPEED The ISO speed of the camera (note: only supported by DC1394 v 2.x backend currently)
# CV_CAP_PROP_BUFFERSIZE Amount of frames stored in internal buffer memory (note: only supported by DC1394 v 2.x backend currently)
print("SETTINGS: ",self.camera.get(cv2.CAP_PROP_SETTINGS))
print("FORMAT: ",self.camera.get(cv2.CAP_PROP_FORMAT))
print("MODE:", self.camera.get(cv2.CAP_PROP_MODE))
print("CHANNEL:", self.camera.get(cv2.CAP_PROP_CHANNEL))
print("AUTOFOCUS:", self.camera.get(cv2.CAP_PROP_AUTOFOCUS))
print("AUTOEXP:", self.camera.get(cv2.CAP_PROP_AUTO_EXPOSURE))
print("PIXFMT:",self.camera.get(cv2.CAP_PROP_CODEC_PIXEL_FORMAT))
if platform.system() == "Linux":
cmd = ['v4l2-ctl', '-V']
returned_output = subprocess.check_output(cmd)
print(returned_output.decode("utf-8"))
# print("----------------------")
# self.camera.set(cv2.CAP_PROP_CHANNEL,1)
# self.camera.set(cv2.CAP_PROP_AUTOFOCUS, 1)
self.camera.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1)
# print("CHANNEL:", self.camera.get(cv2.CAP_PROP_CHANNEL))
# print("AUTOFOCUS:", self.camera.get(cv2.CAP_PROP_AUTOFOCUS))
# print("AUTOEXP:", self.camera.get(cv2.CAP_PROP_AUTO_EXPOSURE))
self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
print(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH), self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
# cmd = ['v4l2-ctl', '--set-fmt-video=pixelformat=MJPG']
# returned_output = subprocess.check_output(cmd)
# print(returned_output.decode("utf-8"))
if platform.system() == "Linux":
cmd = ['v4l2-ctl', '-V']
returned_output = subprocess.check_output(cmd)
print(returned_output.decode("utf-8"))
print(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH), self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
# self.camera.setPixelFormat(VideoMode.PixelFormat.kYUYV)
self.camera.set(cv2.CAP_PROP_FPS, self.set_fps)
# self.camera.setExposureManual(self.exposure)
self.camera.set(cv2.CAP_PROP_EXPOSURE, self.exposure)
# self.camera.setBrightness(1)
self.camera.set(cv2.CAP_PROP_BRIGHTNESS, self.brightness)
# p = self.camera.enumerateVideoModes()
# for pi in p:
# print(pi.fps, pi.height, pi.width, pi.pixelFormat)
count = 0
while True:
# if the thread indicator variable is set, stop the thread
if (self._stop == True):
self._stop = False
self.stopped = True
return
if (lastExposure != self.exposure):
self.setExposure()
lastExposure = self.exposure
# Tell the CvSink to grab a frame from the camera and put it
# in the source image. If there is an error notify the output.
#time, img = cvSink.grabFrame(img)
ret_val, img = self.camera.read()
timestamp = datetime.datetime.now() #Close but not exact, need to work out better sync
if ret_val == 0:
self._grabbed = False
# Send the output the error.
#self.outstream.notifyError(cvSink.getError())
# skip the rest of the current iteration
continue
self._grabbed = True
self.count = self.count
self.duration.start()
self.fps.update()
# if something was grabbed and retreived then lock
# the outboundw buffer for the update
# This limits the blocking to just the copy operations
# later we may consider a queue or double buffer to
# minimize blocking
if (self._grabbed == True):
timestamp_string = datetime.datetime.fromtimestamp(timestamp.timestamp(),datetime.timezone.utc).isoformat()
self._condition.acquire()
self._lock.acquire()
self.count = self.count + 1
self.grabbed = self._grabbed
self.frame = img.copy()
self.timestamp = timestamp_string
self._lock.release()
self._condition.notifyAll()
self._condition.release()
self.duration.update()
print("BucketCapture for " + self.name + " STOPPING")
def read(self):
# return the frame most recently read if the frame
# is not being updated at this exact moment
self._condition.acquire()
self._condition.wait()
self._condition.release()
if (self._lock.acquire() == True):
self.outFrame = self.frame
self.outCount = self.count
self.outTimestamp = self.timestamp
self._lock.release()
return (self.outFrame, self.outCount, self.outTimestamp, True)
else:
return (self.outFrame, self.outCount, "NoTimeStamp", False)
def processUserCommand(self, key):
# if key == ord('x'):
# return True
# elif key == ord('d'):
# self.contrast+=1
# self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast)
# print("CONTRAST = " + str(self.contrast))
# elif key == ord('a'):
# self.contrast-=1
# self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast)
# print("CONTRAST = " + str(self.contrast))
# elif key == ord('e'):
# self.saturation+=1
# self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation)
# print("SATURATION = " + str(self.saturation))
# elif key == ord('q'):
# self.saturation-=1
# self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation)
# print("SATURATION = " + str(self.saturation))
# el
if key == ord('z'):
self.exposure = self.exposure - 1
self.setExposure()
print("EXPOSURE = " + str(self.exposure))
elif key == ord('c'):
self.exposure = self.exposure + 1
self.setExposure()
print("EXPOSURE = " + str(self.exposure))
elif key == ord('w'):
self.brightness+=1
self.camera.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness)
print("BRIGHT = " + str(self.brightness))
elif key == ord('s'):
self.brightness-=1
self.camera.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness)
print("BRIGHT = " + str(self.brightness))
elif key == ord('p'):
self.iso = self.iso + 100
self.camera.set(cv2.CAP_PROP_ISO_SPEED, self.iso)
print("ISO = " + str(self.iso))
elif key == ord('i'):
self.iso = self.iso - 100
self.camera.set(cv2.CAP_PROP_ISO_SPEED, self.iso)
print("ISO = " + str(self.iso))
return False
def updateExposure(self, exposure):
self.exposure = exposure
def setExposure(self):
self.camera.set(cv2.CAP_PROP_EXPOSURE, self.exposure)
pass
def stop(self):
# indicate that the thread should be stopped
self._stop = True
self._condition.acquire()
self._condition.notifyAll()
self._condition.release()
def isStopped(self):
return self.stopped
|
testMP.py
|
#!/usr/bin/env python
#
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time
import random
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().name, func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(10)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS1)):
print '\t', done_queue.get()
# Add more tasks using `put()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freeze_support()
test()
|
Saltadder.py
|
import os
import sys
import argparse
import threading
pasr=argparse.ArgumentParser(description="Salt Adder in PasswordList",epilog='''Usage Instruction :
./saltadder -s salt -p passwordlist -t threads -l 0 -o OutputFile ''')
pasr.add_argument("-s",dest="Salt",help="Specify Salt",required=True,type=str)
pasr.add_argument("-p",dest="password",help="Specify passwordList Location",required=True)
pasr.add_argument("-l",dest="place",help="Specify Salt adding Place front=0,back=1,both=2",required=True,type=int)
pasr.add_argument("-t",dest="Threads",help="Specify Threads to run",type=int)
pasr.add_argument("-o",dest="Output",help="Specify Output file name",required=True)
args =pasr.parse_args()
print (args)
try:
os.path.exists(args.password)
f=open(args.password,"r").readlines()
except Exception as e:
sys.exit("[*]Your password file is not Found")
sa=args.Salt
name=args.place
rg=args.Threads
def addpo(na):
if os.path.exists(args.Output):
f1=open (args.Output,"a")
if (name==0):
out= sa+na
f1.write(out+"\n")
f1.close()
elif (name==1):
out= na+sa
f1.write(out+"\n")
f1.close()
elif (name==2):
out= sa+na+sa
f1.write(out+"\n")
f1.close()
else:
f = open(args.Output, "x")
f.close()
f1=open (args.Output,"a")
if (name==0):
out= sa+na
f1.write(out+"\n")
f1.close()
elif (name==1):
out= na+sa
f1.write(out+"\n")
f1.close()
elif (name==2):
out= sa+na+sa
f1.write(out+"\n")
f1.close()
def notrd():
for sd in f:
sd=sd.rstrip()
addpo(sd)
def thread(count):
if ( count==0):
for ad in range(0,rg) :
sb=f[ad]
ss=sb.rstrip()
addpo(ss)
else:
end=count*rg
st=end-(rg-1)
for ad in range(st,end) :
sb=f[ad]
ss=sb.rstrip()
addpo(ss)
if (rg==None):
notrd()
else:
for apl in range(rg):
t1=threading.Thread(target=thread,args=(apl,),name="t1")
t1.start()
|
serve_normalize.py
|
"""
sentry.management.commands.serve_normalize
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2018 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import SocketServer
import base64
import os
import stat
import sys
import time
import traceback
import json
import resource
import multiprocessing
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.utils.encoding import force_str
class ForkingUnixStreamServer(SocketServer.ForkingMixIn, SocketServer.UnixStreamServer):
pass
def catch_errors(f):
def wrapper(*args, **kwargs):
error = None
try:
return f(*args, **kwargs)
except Exception as e:
error = force_str(e.message) + " " + force_str(traceback.format_exc())
try:
return encode({"result": None, "error": error, "metrics": None})
except (ValueError, TypeError) as e:
try:
# Encoding error, try to send the exception instead
return encode(
{
"result": None,
"error": force_str(e.message) + " " + force_str(traceback.format_exc()),
"metrics": None,
"encoding_error": True,
}
)
except Exception:
return b"{}"
return wrapper
# Here's where the normalization itself happens
def process_event(data, meta):
from sentry.event_manager import EventManager
from sentry.tasks.store import should_process
event_manager = EventManager(
data,
client_ip=meta.get("REMOTE_ADDR"),
user_agent=meta.get("HTTP_USER_AGENT"),
auth=None,
key=None,
content_encoding=meta.get("HTTP_CONTENT_ENCODING"),
)
event_manager.normalize()
event = event_manager.get_data()
group_hash = None
if not should_process(event):
group_hash = event_manager._get_event_instance(project_id=1).get_hashes()
return {"event": dict(event), "group_hash": group_hash}
def decode(message):
meta, data_encoded = json.loads(message)
data = base64.b64decode(data_encoded)
return data, meta
def encode(data):
# Normalized data should be serializable
return json.dumps(data)
@catch_errors
def handle_data(data):
mc = MetricCollector()
metrics_before = mc.collect_metrics()
data, meta = decode(data)
rv = process_event(data, meta)
metrics_after = mc.collect_metrics()
return encode(
{"result": rv, "metrics": {"before": metrics_before, "after": metrics_after}, "error": None}
)
def handle_data_piped(pipe, data):
pipe.send(handle_data(data))
class MetricCollector(object):
def __init__(self):
self.is_linux = sys.platform.startswith("linux")
self.pid = os.getpid()
def collect_metrics(self):
metrics = {"time": time.time()}
usage = resource.getrusage(resource.RUSAGE_SELF)
usage_dict = {attr: getattr(usage, attr) for attr in dir(usage) if attr.startswith("ru_")}
metrics.update(usage_dict)
if self.is_linux:
with open("/proc/{}/status".format(self.pid)) as procfh:
metrics["proc"] = procfh.read()
return metrics
class EventNormalizeHandler(SocketServer.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
BUFFER_SIZE = 4096
SOCKET_TIMEOUT = 10.0
def handle(self):
self.server.socket.settimeout(self.SOCKET_TIMEOUT)
chunks = []
# Receive the data
while True:
rcvd = self.request.recv(self.BUFFER_SIZE)
if rcvd is None:
raise ValueError("Received None")
if not rcvd:
break
chunks.append(rcvd)
self.data = "".join(chunks)
response = self.handle_data()
self.request.sendall(response)
self.request.close()
def handle_data(self):
@catch_errors
def inner():
# TODO: Remove this contraption once we no longer get segfaults
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(target=handle_data_piped, args=(child_conn, self.data))
p.start()
p.join(1)
assert parent_conn.poll(), "Process crashed"
return parent_conn.recv()
return inner()
class Command(BaseCommand):
help = "Start a socket server for event normalization"
option_list = BaseCommand.option_list + (
make_option(
"--unix",
dest="socket_file",
help='Unix socket to bind to. Example: "/tmp/normalize.sock"',
),
make_option(
"--net",
dest="network_socket",
help='Network socket to bind to. Example: "127.0.0.1:1234"',
),
make_option(
"--threading", action="store_true", dest="threading", help="Start a threading server"
),
make_option(
"--forking", action="store_true", dest="forking", help="Start a forking server"
),
)
def _check_socket_path(self, socket_file):
if os.path.exists(socket_file):
file_mode = os.stat(socket_file).st_mode
if not stat.S_ISSOCK(file_mode):
raise CommandError("File already exists and is not a socket")
# Make sure the socket does not already exist
try:
os.unlink(socket_file)
except OSError:
if os.path.exists(socket_file):
raise
def handle(self, **options):
socket_file = options.get("socket_file")
network_socket = options.get("network_socket")
threading = options.get("threading")
forking = options.get("forking")
if threading and forking:
raise CommandError("Pick one: threading or forking.")
if socket_file and network_socket:
raise CommandError("Only one socket allowed at a time")
if threading:
server_type = "threading"
elif forking:
server_type = "forking"
else:
server_type = "single-threaded"
self.stdout.write("Server type: %s\n" % (server_type,))
if socket_file:
self.socket_file = os.path.abspath(socket_file)
self._check_socket_path(socket_file)
self.stdout.write("Binding to unix socket: %s\n" % (socket_file,))
if threading:
server = SocketServer.ThreadingUnixStreamServer(socket_file, EventNormalizeHandler)
server.daemon_threads = True
elif forking:
server = ForkingUnixStreamServer(socket_file, EventNormalizeHandler)
else:
server = SocketServer.UnixStreamServer(socket_file, EventNormalizeHandler)
elif network_socket:
host, port = network_socket.split(":")
port = int(port)
self.stdout.write("Binding to network socket: %s:%s\n" % (host, port))
if threading:
server = SocketServer.ThreadingTCPServer((host, port), EventNormalizeHandler)
server.daemon_threads = True
elif forking:
server = SocketServer.ForkingTCPServer((host, port), EventNormalizeHandler)
else:
server = SocketServer.TCPServer((host, port), EventNormalizeHandler)
else:
raise CommandError("No connection option specified")
server.serve_forever()
|
run_cmd.py
|
import argparse
from threading import Thread
from scrabble.engine import ClientEngine, ReplayEngine, ServerEngine
def init_parser():
parser = argparse.ArgumentParser(description='Scrabble game', prog='scrabble')
subparsers = parser.add_subparsers(required=True)
server = subparsers.add_parser('host', help='Server part')
server.add_argument('--port', type=str, help='Server port', default='5678')
server.add_argument('--host', type=str, help='Server host', default=None)
server.set_defaults(mode='host')
client = subparsers.add_parser('player', help='Player part')
client.add_argument('username', type=str, help='Player username')
client.add_argument('game_id', type=int, help='Game ID')
client.add_argument('host', type=str, help='Host address or IP to connect')
client.add_argument('port', type=int, help='Host port to connect')
client.set_defaults(mode='player')
tester = subparsers.add_parser('replay', help='Replay game events')
tester.add_argument('game_id', type=int, help='Game ID')
tester.add_argument('events_file', type=str, help='File with game events')
tester.add_argument('--sequence', type=int, help='Event sequence to stop at')
tester.add_argument('--player', type=str, default='__tester__', help='Player of the game')
tester.set_defaults(mode='replay')
return parser
if __name__ == '__main__':
parser = init_parser()
args = parser.parse_args()
if args.mode == 'host':
server_engine = ServerEngine()
server_engine.run_with_cmd(host=args.host, port=args.port)
elif args.mode == 'player':
client_engine = ClientEngine(args.username, args.game_id)
t = Thread(target=client_engine.run, args=(args.host, args.port))
t.start()
t.join()
elif args.mode == 'replay':
replay_engine = ReplayEngine(args.game_id, args.events_file, args.player,
sequence=args.sequence)
t = Thread(target=replay_engine.run)
t.start()
t.join()
|
utils.py
|
from bitcoin.core import COIN # type: ignore
from bitcoin.rpc import RawProxy as BitcoinProxy # type: ignore
from bitcoin.rpc import JSONRPCError
from contextlib import contextmanager
from pathlib import Path
from pyln.client import RpcError
from pyln.testing.btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve # type: ignore
from pyln.client import ThorRpc
from pyln.client import Millisatoshi
import json
import logging
import lzma
import math
import os
import psutil # type: ignore
import random
import re
import shutil
import sqlite3
import string
import struct
import subprocess
import sys
import threading
import time
import warnings
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
"fallbackfee": Decimal(1000) / COIN,
}
THORD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
FUNDAMOUNT = 10**6
def env(name, default=None):
"""Access to environment variables
Allows access to environment variables, falling back to config.vars (part
of c-thor's `./configure` output), and finally falling back to a
default value.
"""
fname = 'config.vars'
if os.path.exists(fname):
lines = open(fname, 'r').readlines()
config = dict([(line.rstrip().split('=', 1)) for line in lines])
else:
config = {}
if name in os.environ:
return os.environ[name]
elif name in config:
return config[name]
else:
return default
VALGRIND = env("VALGRIND") == "1"
TEST_NETWORK = env("TEST_NETWORK", 'regtest')
DEVELOPER = env("DEVELOPER", "0") == "1"
TEST_DEBUG = env("TEST_DEBUG", "0") == "1"
SLOW_MACHINE = env("SLOW_MACHINE", "0") == "1"
DEPRECATED_APIS = env("DEPRECATED_APIS", "0") == "1"
TIMEOUT = int(env("TIMEOUT", 180 if SLOW_MACHINE else 60))
EXPERIMENTAL_DUAL_FUND = env("EXPERIMENTAL_DUAL_FUND", "0") == "1"
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
raise ValueError("Timeout while waiting for {}", success)
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
def write_config(filename, opts, regtest_opts=None, section_name='regtest'):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[{}]\n".format(section_name))
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def mine_funding_to_announce(bitcoind, nodes, num_blocks=5, wait_for_mempool=0):
"""Mine blocks so a channel can be announced (5, if it's already
mined), but make sure we don't leave nodes behind who will reject the
announcement. Not needed if there are only two nodes.
"""
bitcoind.generate_block(num_blocks - 1, wait_for_mempool)
sync_blockheight(bitcoind, nodes)
bitcoind.generate_block(1)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
self.err_logs = []
self.prefix = ""
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self, stdin=None, stdout=None, stderr=None):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line,
stdin=stdin,
stdout=stdout if stdout else subprocess.PIPE,
stderr=stderr,
env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
line = line.decode('UTF-8', 'replace').rstrip()
if self.log_filter(line):
continue
if self.verbose:
sys.stdout.write("{}: {}\n".format(self.prefix, line))
with self.logs_cond:
self.logs.append(line)
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
if self.proc.stderr:
for line in iter(self.proc.stderr.readline, ''):
if line is None or len(line) == 0:
break
line = line.rstrip().decode('UTF-8', 'replace')
self.err_logs.append(line)
self.proc.stderr.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def is_in_stderr(self, regex):
"""Look for `regex` in stderr."""
ex = re.compile(regex)
for l in self.err_logs:
if ex.search(l):
logging.debug("Found '%s' in stderr", regex)
return l
logging.debug("Did not find '%s' in stderr", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
The logs contain tailed stdout of the process. We look for each regex
in `regexs`, starting from `logsearch_start` which normally is the
position of the last found entry of a previous wait-for logs call.
The ordering inside `regexs` doesn't matter.
We fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
with self.logs_cond:
if pos >= len(self.logs):
if not self.running:
raise ValueError('Process died while waiting for logs')
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
logging.debug("Calling {name} with arguments {args}".format(
name=name,
args=args
))
res = proxy._call(name, *args)
logging.debug("Result for {name} call: {res}".format(
name=name,
res=res,
))
return res
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-txindex',
'-nowallet',
'-addresstype=bech32'
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
self.conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(self.conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
try:
self.rpc.createwallet("thord-tests")
except JSONRPCError:
self.rpc.loadwallet("thord-tests")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
proxy.start()
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0, to_addr=None):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
mempool = self.rpc.getrawmempool()
logging.debug("Generating {numblocks}, confirming {lenmempool} transactions: {mempool}".format(
numblocks=numblocks,
mempool=mempool,
lenmempool=len(mempool),
))
# As of 0.16, generate() is removed; use generatetoaddress.
if to_addr is None:
to_addr = self.rpc.getnewaddress()
return self.rpc.generatetoaddress(numblocks, to_addr)
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height=[height] and re-mine all mempool
transactions into [height + shift], where shift >= 0. Returns hashes of generated
blocks.
Note that tx's that become invalid at [height] (because coin maturity, locktime
etc.) are removed from mempool. The length of the new chain will be original + 1
OR original + [shift], whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can be pulled
forward to h1.
2. Set [height]=h2 and [shift]= h1-h2
"""
hashes = []
fee_delta = 1000000
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self.rpc.invalidateblock(old_hash)
self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'.format(height))
memp = self.rpc.getrawmempool()
if shift == 0:
hashes += self.generate_block(1 + final_len - height)
else:
for txid in memp:
# lower priority (to effective feerate=0) so they are not mined
self.rpc.prioritisetransaction(txid, None, -fee_delta)
hashes += self.generate_block(shift)
for txid in memp:
# restore priority so they are mined
self.rpc.prioritisetransaction(txid, None, fee_delta)
hashes += self.generate_block(1 + final_len - (height + shift))
self.wait_for_log(r'UpdateTip: new best=.* height={}'.format(final_len))
return hashes
def getnewaddress(self):
return self.rpc.getnewaddress()
class ElementsD(BitcoinD):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
config = BITCOIND_CONFIG.copy()
if 'regtest' in config:
del config['regtest']
config['chain'] = 'liquid-regtest'
BitcoinD.__init__(self, bitcoin_dir, rpcport)
self.cmd_line = [
'elementsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-nowallet',
'-validatepegin=0',
'-con_blocksubsidy=5000000000',
]
conf_file = os.path.join(bitcoin_dir, 'elements.conf')
config['rpcport'] = self.rpcport
BITCOIND_REGTEST = {'rpcport': self.rpcport}
write_config(conf_file, config, BITCOIND_REGTEST, section_name='liquid-regtest')
self.conf_file = conf_file
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.prefix = 'elementsd'
def getnewaddress(self):
"""Need to get an address and then make it unconfidential
"""
addr = self.rpc.getnewaddress()
info = self.rpc.getaddressinfo(addr)
return info['unconfidential']
class ThorD(TailableProc):
def __init__(self, thor_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, thor_dir)
self.executable = 'thord'
self.thor_dir = thor_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = THORD_CONFIG.copy()
opts = {
'thor-dir': thor_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': '{}'.format("true" if DEPRECATED_APIS
else "false"),
'network': TEST_NETWORK,
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
# Make sure we don't touch any existing config files in the user's $HOME
'bitcoin-datadir': thor_dir,
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(os.path.join(thor_dir, TEST_NETWORK)):
os.makedirs(os.path.join(thor_dir, TEST_NETWORK))
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', thor_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(thor_dir, TEST_NETWORK, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-fast-gossip'] = None
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'thord-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self, stdin=None, stdout=None, stderr=None,
wait_for_initialized=True):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self, stdin, stdout, stderr)
if wait_for_initialized:
self.wait_for_log("Server started with public key")
logging.info("ThorD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class PrettyPrintingThorRpc(ThorRpc):
"""A version of the ThorRpc that pretty-prints calls and results.
Useful when debugging based on logs, and less painful to the
eyes. It has some overhead since we re-serialize the request and
result to json in order to pretty print it.
Also validates (optional) schemas for us.
"""
def __init__(self, socket_path, executor=None, logger=logging,
patch_json=True, jsonschemas={}):
super().__init__(
socket_path,
executor,
logger,
patch_json,
)
self.jsonschemas = jsonschemas
def call(self, method, payload=None):
id = self.next_id
self.logger.debug(json.dumps({
"id": id,
"method": method,
"params": payload
}, indent=2))
res = ThorRpc.call(self, method, payload)
self.logger.debug(json.dumps({
"id": id,
"result": res
}, indent=2))
if method in self.jsonschemas:
self.jsonschemas[method].validate(res)
return res
class ThorNode(object):
def __init__(self, node_id, thor_dir, bitcoind, executor, valgrind, may_fail=False,
may_reconnect=False,
allow_broken_log=False,
allow_warning=False,
allow_bad_gossip=False,
db=None, port=None, disconnect=None, random_hsm=None, options=None,
jsonschemas={},
valgrind_plugins=True,
**kwargs):
self.bitcoin = bitcoind
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
self.allow_broken_log = allow_broken_log
self.allow_bad_gossip = allow_bad_gossip
self.allow_warning = allow_warning
self.db = db
# Assume successful exit
self.rc = 0
socket_path = os.path.join(thor_dir, TEST_NETWORK, "thor-rpc").format(node_id)
self.rpc = PrettyPrintingThorRpc(socket_path, self.executor, jsonschemas=jsonschemas)
self.daemon = ThorD(
thor_dir, bitcoindproxy=bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
self.daemon.disconnect_file = os.path.join(thor_dir, TEST_NETWORK, "dev_disconnect")
with open(self.daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
self.daemon.opts["dev-disconnect"] = "dev_disconnect"
if DEVELOPER:
self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
# Don't run --version on every subdaemon if we're valgrinding and slow.
if SLOW_MACHINE and VALGRIND:
self.daemon.opts["dev-no-version-checks"] = None
if os.getenv("DEBUG_SUBD"):
self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if valgrind:
self.daemon.env["THORD_DEV_NO_BACKTRACE"] = "1"
self.daemon.opts["dev-no-plugin-checksum"] = None
else:
# Under valgrind, scanning can access uninitialized mem.
self.daemon.env["THORD_DEV_MEMLEAK"] = "1"
if not may_reconnect:
self.daemon.opts["dev-no-reconnect"] = None
if EXPERIMENTAL_DUAL_FUND:
self.daemon.opts["experimental-dual-fund"] = None
if options is not None:
self.daemon.opts.update(options)
dsn = db.get_dsn()
if dsn is not None:
self.daemon.opts['wallet'] = dsn
if valgrind:
trace_skip_pattern = '*python*,*bitcoin-cli*,*elements-cli*'
if not valgrind_plugins:
trace_skip_pattern += ',*plugins*'
self.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip={}'.format(trace_skip_pattern),
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(self.daemon.thor_dir)
]
# Reduce precision of errors, speeding startup and reducing memory greatly:
if SLOW_MACHINE:
self.daemon.cmd_prefix += ['--read-inline-info=no']
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity=FUNDAMOUNT, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
res = self.rpc.fundchannel(remote_node.info['id'], capacity)
if confirm or wait_for_announce:
self.bitcoin.generate_block(1, wait_for_mempool=res['txid'])
if wait_for_announce:
self.bitcoin.generate_block(5)
wait_for(lambda: ['alias' in e for e in self.rpc.listnodes(remote_node.info['id'])['nodes']])
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': res['tx']}
def fundwallet(self, sats, addrtype="p2sh-segwit", mine_block=True):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
if mine_block:
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
return addr, txid
def fundbalancedchannel(self, remote_node, total_capacity, announce=True):
'''
Creates a perfectly-balanced channel, as all things should be.
'''
if isinstance(total_capacity, Millisatoshi):
total_capacity = int(total_capacity.to_satoshi())
else:
total_capacity = int(total_capacity)
self.fundwallet(total_capacity + 10000)
if remote_node.config('experimental-dual-fund'):
remote_node.fundwallet(total_capacity + 10000)
# We cut the total_capacity in half, since the peer's
# expected to contribute that same amount
chan_capacity = total_capacity // 2
total_capacity = chan_capacity * 2
# Tell the node to equally dual-fund the channel
remote_node.rpc.call('funderupdate', {'policy': 'match',
'policy_mod': 100,
'fuzz_percent': 0})
else:
chan_capacity = total_capacity
self.rpc.connect(remote_node.info['id'], 'localhost', remote_node.port)
res = self.rpc.fundchannel(remote_node.info['id'], chan_capacity, feerate='slow', minconf=0, announce=announce, push_msat=Millisatoshi(chan_capacity * 500))
blockid = self.bitcoin.generate_block(1, wait_for_mempool=res['txid'])[0]
# Generate the scid.
for i, txid in enumerate(self.bitcoin.rpc.getblock(blockid)['tx']):
if txid == res['txid']:
txnum = i
return '{}x{}x{}'.format(self.bitcoin.rpc.getblockcount(), txnum, res['outnum'])
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
return self.db.query(query)
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.thor_dir, TEST_NETWORK, "thord.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def is_synced_with_bitcoin(self, info=None):
if info is None:
info = self.rpc.getinfo()
return 'warning_bitcoind_sync' not in info and 'warning_thord_sync' not in info
def start(self, wait_for_bitcoind_sync=True, stderr=None):
self.daemon.start(stderr=stderr)
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
wait_for(lambda: self.is_synced_with_bitcoin())
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
self.rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if self.rc is None:
self.rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if self.rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(self.rc))
else:
return self.rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the thor node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True, announce_channel=True):
warnings.warn("ThorNode.fund_channel is deprecated in favor of "
"ThorNode.fundchannel", category=DeprecationWarning)
return self.fundchannel(l2, amount, wait_for_active, announce_channel)
def fundchannel(self, l2, amount=FUNDAMOUNT, wait_for_active=True,
announce_channel=True, **kwargs):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
def has_funds_on_addr(addr):
"""Check if the given address has funds in the internal wallet.
"""
outs = self.rpc.listfunds()['outputs']
addrs = [o['address'] for o in outs]
return addr in addrs
# We should not have funds on that address yet, we just generated it.
assert(not has_funds_on_addr(addr))
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
self.bitcoin.generate_block(1)
# Now we should.
wait_for(lambda: has_funds_on_addr(addr))
# Now go ahead and open a channel
res = self.rpc.fundchannel(l2.info['id'], amount,
announce=announce_channel,
**kwargs)
blockid = self.bitcoin.generate_block(1, wait_for_mempool=res['txid'])[0]
for i, txid in enumerate(self.bitcoin.rpc.getblock(blockid)['tx']):
if txid == res['txid']:
txnum = i
scid = "{}x{}x{}".format(self.bitcoin.rpc.getblockcount(),
txnum, res['outnum'])
if wait_for_active:
self.wait_channel_active(scid)
l2.wait_channel_active(scid)
return scid, res
def subd_pid(self, subd, peerid=None):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
if peerid:
ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
.format(peerid, subd))
else:
ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def get_channel_id(self, other):
"""Get the channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels(chanid)['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=TIMEOUT):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
# This helper waits for all HTLCs to settle
# `scids` can be a list of strings. If unset wait on all channels.
def wait_for_htlcs(self, scids=None):
peers = self.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel['short_channel_id'] not in scids:
continue
if 'htlcs' in channel:
wait_for(lambda: len(self.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0)
# This sends money to a directly connected peer
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
# check we are connected
dst_id = dst.info['id']
assert len(self.rpc.listpeers(dst_id).get('peers')) == 1
# make an invoice
inv = dst.rpc.invoice(amt, label, label)
# FIXME: pre 0.10.1 invoice calls didn't have payment_secret field
psecret = dst.rpc.decodepay(inv['bolt11'])['payment_secret']
rhash = inv['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst_id,
'delay': 5,
'channel': '1x1x1' # note: can be bogus for 1-hop direct payments
}
# sendpay is async now
self.rpc.sendpay([routestep], rhash, payment_secret=psecret)
# wait for sendpay to comply
result = self.rpc.waitsendpay(rhash)
assert(result.get('status') == 'complete')
# Make sure they're all settled, in case we quickly mine blocks!
dst.wait_for_htlcs()
# This helper sends all money to a peer until even 1 msat can't get through.
def drain(self, peer):
total = 0
msat = 4294967295 # Max payment size in some configs
while msat != 0:
try:
logging.debug("Drain step with size={}".format(msat))
self.pay(peer, msat)
total += msat
except RpcError as e:
logging.debug("Got an exception while draining channel: {}".format(e))
msat //= 2
logging.debug("Draining complete after sending a total of {}msats".format(total))
return total
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [6, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [12, 'ECONOMICAL']:
feerate = feerates[2] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[3] * 4
else:
warnings.warn("Don't have a feerate set for {}/{}.".format(
params[0], params[1],
))
feerate = 42
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda:
self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 4)
# force new feerates by restarting and thus skipping slow smoothed process
# Note: testnode must be created with: opts={'may_reconnect': True}
def force_feerates(self, rate):
assert(self.may_reconnect)
self.set_feerates([rate] * 4, False)
self.restart()
self.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
assert(self.rpc.feerates('perkw')['perkw']['opening'] == rate)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def query_gossip(self, querytype, *args, filters=[]):
"""Generate a gossip query, feed it into this node and get responses
in hex"""
query = subprocess.run(['devtools/mkquery',
querytype] + [str(a) for a in args],
check=True,
timeout=TIMEOUT,
stdout=subprocess.PIPE).stdout.strip()
out = subprocess.run(['devtools/gossipwith',
'--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
'{}@localhost:{}'.format(self.info['id'],
self.port),
query],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
def passes_filters(hmsg, filters):
for f in filters:
if hmsg.startswith(f):
return False
return True
msgs = []
while len(out):
length = struct.unpack('>H', out[0:2])[0]
hmsg = out[2:2 + length].hex()
if passes_filters(hmsg, filters):
msgs.append(out[2:2 + length].hex())
out = out[2 + length:]
return msgs
def config(self, config_name):
try:
opt = self.rpc.listconfigs(config_name)
return opt[config_name]
except RpcError:
return None
@contextmanager
def flock(directory: Path):
"""A fair filelock, based on atomic fs operations.
"""
if not isinstance(directory, Path):
directory = Path(directory)
d = directory / Path(".locks")
os.makedirs(str(d), exist_ok=True)
fname = None
while True:
# Try until we find a filename that doesn't exist yet.
try:
fname = d / Path("lock-{}".format(time.time()))
fd = os.open(str(fname), flags=os.O_CREAT | os.O_EXCL)
os.close(fd)
break
except FileExistsError:
time.sleep(0.1)
# So now we have a position in the lock, let's check if we are the
# next one to go:
while True:
files = sorted([f for f in d.iterdir() if f.is_file()])
# We're queued, so it should at least have us.
assert len(files) >= 1
if files[0] == fname:
break
time.sleep(0.1)
# We can continue
yield fname
# Remove our file, so the next one can go ahead.
fname.unlink()
class Throttler(object):
"""Throttles the creation of system-processes to avoid overload.
There is no reason to overload the system with too many processes
being spawned or run at the same time. It causes timeouts by
aggressively preempting processes and swapping if the memory limit is
reached. In order to reduce this loss of performance we provide a
`wait()` method which will serialize the creation of processes, but
also delay if the system load is too high.
Notice that technically we are throttling too late, i.e., we react
to an overload, but chances are pretty good that some other
already running process is about to terminate, and so the overload
is short-lived. We throttle when the process object is first
created, not when restarted, in order to avoid delaying running
tests, which could cause more timeouts.
"""
def __init__(self, directory: str, target: float = 90):
"""If specified we try to stick to a load of target (in percent).
"""
self.target = target
self.current_load = self.target # Start slow
psutil.cpu_percent() # Prime the internal load metric
self.directory = directory
def wait(self):
start_time = time.time()
with flock(self.directory):
# We just got the lock, assume someone else just released it
self.current_load = 100
while self.load() >= self.target:
time.sleep(1)
self.current_load = 100 # Back off slightly to avoid triggering right away
print("Throttler delayed startup for {} seconds".format(time.time() - start_time))
def load(self):
"""An exponential moving average of the load
"""
decay = 0.5
load = psutil.cpu_percent()
self.current_load = decay * load + (1 - decay) * self.current_load
return self.current_load
class NodeFactory(object):
"""A factory to setup and start `thord` daemons.
"""
def __init__(self, request, testname, bitcoind, executor, directory,
db_provider, node_cls, throttler, jsonschemas):
if request.node.get_closest_marker("slow_test") and SLOW_MACHINE:
self.valgrind = False
else:
self.valgrind = VALGRIND
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
self.db_provider = db_provider
self.node_cls = node_cls
self.throttler = throttler
self.jsonschemas = jsonschemas
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'allow_broken_log',
'allow_warning',
'may_reconnect',
'random_hsm',
'feerates',
'wait_for_bitcoind_sync',
'allow_bad_gossip',
'start',
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a thor node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
# Only trace one random node's plugins, to avoid OOM.
if SLOW_MACHINE:
valgrind_plugins = [False] * num_nodes
valgrind_plugins[random.randint(0, num_nodes - 1)] = True
else:
valgrind_plugins = [True] * num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts,
valgrind_plugins=valgrind_plugins[i]
))
return [j.result() for j in jobs]
def get_node(self, node_id=None, options=None, dbfile=None,
feerates=(15000, 11000, 7500, 3750), start=True,
wait_for_bitcoind_sync=True, may_fail=False,
expect_fail=False, cleandir=True, **kwargs):
self.throttler.wait()
node_id = self.get_node_id() if not node_id else node_id
port = self.get_next_port()
thor_dir = os.path.join(
self.directory, "thor-{}/".format(node_id))
if cleandir and os.path.exists(thor_dir):
shutil.rmtree(thor_dir)
# Get the DB backend DSN we should be using for this test and this
# node.
db = self.db_provider.get_db(os.path.join(thor_dir, TEST_NETWORK), self.testname, node_id)
node = self.node_cls(
node_id, thor_dir, self.bitcoind, self.executor, self.valgrind, db=db,
port=port, options=options, may_fail=may_fail or expect_fail,
jsonschemas=self.jsonschemas,
**kwargs
)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if dbfile:
out = open(os.path.join(node.daemon.thor_dir, TEST_NETWORK,
'thord.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
# Capture stderr if we're failing
if expect_fail:
stderr = subprocess.PIPE
else:
stderr = None
node.start(wait_for_bitcoind_sync, stderr=stderr)
except Exception:
if expect_fail:
return node
node.daemon.stop()
raise
return node
def join_nodes(self, nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, announce_channels=True) -> None:
"""Given nodes, connect them in a line, optionally funding a channel, wait_for_announce waits for channel and node announcements"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
connections = [(nodes[i], nodes[i + 1]) for i in range(len(nodes) - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log(r'{}-.*-chan#[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return
bitcoind = nodes[0].bitcoin
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txids = []
for src, dst in connections:
txids.append(src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)['txid'])
# Confirm all channels and wait for them to become usable
bitcoind.generate_block(1, wait_for_mempool=txids)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
scids.append(scid)
# Wait for all channels to be active (locally)
for i, n in enumerate(scids):
nodes[i].wait_channel_active(scids[i])
nodes[i + 1].wait_channel_active(scids[i])
if not wait_for_announce:
return
bitcoind.generate_block(5)
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
nodes[0].wait_channel_active(scids[-1])
nodes[-1].wait_channel_active(scids[0])
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
def line_graph(self, num_nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
nodes = self.get_nodes(num_nodes, opts=opts)
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
err_msgs = []
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not self.valgrind and DEVELOPER:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
unexpected_fail = True
err_msgs.append("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.thor_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail, err_msgs
|
mp_process05.py
|
from multiprocessing import Process, Pipe
def func(conn):
conn.send([42, None, 'hello'])
conn.close()
if __name__ == '__main__':
parent_conn, child_conn = Pipe()
p = Process(target=func, args=(child_conn,))
p.start()
print(parent_conn.recv()) # prints "[42, None, 'hello']"
p.join()
|
animation.py
|
from threading import Thread
import time
import pygame
from pygame_widgets import Button
class AnimationBase:
def __init__(self, widget, timeout, allowMultiple=False, **kwargs):
"""Base for animations
:param widget: The widget that the animation targets
:param time: The time of the animation in seconds
:param kwargs:
"""
self.widget = widget
self.timeout = timeout
self.allowMultiple = allowMultiple
self.params = kwargs
self.thread = Thread()
self.started = False
self.runOnce = False
def start(self):
if not self.started and not (self.runOnce and not self.allowMultiple):
self.thread = Thread(target=self.loop)
self.thread.start()
def loop(self):
self.started = self.runOnce = True
start = time.time()
initialParams = {}
for param, target in self.params.items():
initialParams[param] = self.widget.get(param)
# Animate
while time.time() - start < self.timeout:
step = (time.time() - start) / self.timeout
for param, target in self.params.items():
newValue = initialParams[param] + step * (target - initialParams[param])
self.widget.set(param, newValue)
# Ensure value is exactly correct at end
for param, target in self.params.items():
self.widget.set(param, target)
self.started = False
class Translate(AnimationBase):
def __init__(self, widget, timeout, x, y):
super().__init__(widget, timeout, x=x, y=y)
class Resize(AnimationBase):
def __init__(self, widget, timeout, width, height):
super().__init__(widget, timeout, width=width, height=height)
if __name__ == '__main__':
def animate():
resize.start()
translate.start()
pygame.init()
win = pygame.display.set_mode((600, 600))
button = Button(win, 100, 100, 300, 150)
resize = Resize(button, 3, 200, 200)
translate = Translate(button, 3, 200, 200)
button.setOnClick(animate)
run = True
while run:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
run = False
quit()
win.fill((255, 255, 255))
button.listen(events)
button.draw()
pygame.display.update()
|
quick_chats.py
|
import flatbuffers
import multiprocessing
import queue
from threading import Thread
from rlbot.messages.flat import QuickChat
from rlbot.messages.flat import QuickChatSelection
from rlbot.utils.logging_utils import get_logger
def get_quick_chats():
"""
Look for quick chats from here:
https://github.com/RLBot/RLBot/blob/master/src/main/flatbuffers/rlbot.fbs
"""
result = lambda: None
obj = QuickChatSelection.QuickChatSelection
quick_chat_list = [a for a in dir(obj) if not a.startswith('__') and not callable(getattr(obj,a))]
for i in range(len(quick_chat_list)):
setattr(result, quick_chat_list[i], getattr(obj, quick_chat_list[i]))
setattr(result, 'quick_chat_list', quick_chat_list)
setattr(result, 'CHAT_NONE', -1)
setattr(result, 'CHAT_EVERYONE', False)
setattr(result, 'CHAT_TEAM_ONLY', True)
return result
QuickChats = get_quick_chats()
def send_quick_chat_flat(game_interface, index, team, team_only, quick_chat):
builder = flatbuffers.Builder(0)
QuickChat.QuickChatStart(builder)
QuickChat.QuickChatAddQuickChatSelection(builder, quick_chat)
QuickChat.QuickChatAddPlayerIndex(builder, index)
QuickChat.QuickChatAddTeamOnly(builder, team_only)
result = QuickChat.QuickChatEnd(builder)
builder.Finish(result)
game_interface.send_chat_flat(builder)
def send_quick_chat(queue_holder, index, team, team_only, quick_chat):
"""
Sends a quick chat to the general queue for everyone to pull from
:param queue_holder:
:param index: The index of the player sending the message
:param team: The team of the player sending the message
:param team_only: if the message is team only
:param quick_chat: The contents of the quick chat
:return:
"""
queue_holder["output"].put((index, team, team_only, quick_chat))
def register_for_quick_chat(queue_holder, called_func, quit_event):
"""
Registers a function to be called anytime this queue gets a quick chat.
:param queue_holder: This holds the queues for the bots
:param called_func: This is the function that is called when a quick chat is received
:param quit_event: This event will be set when rlbot is trying to shut down
:return: The newly created thread.
"""
def threaded_func(chat_queue, called_func, quit_event):
while not quit_event.is_set():
try:
next_message = chat_queue.get(timeout=0.01)
index, team, chat = next_message
called_func(index, team, chat)
except queue.Empty:
pass
return
thread = Thread(target=threaded_func, args=(queue_holder["input"], called_func, quit_event))
thread.start()
return thread
class QuickChatManager:
bot_queues = {}
def __init__(self, game_interface):
self.game_interface = game_interface
self.manager = multiprocessing.Manager()
self.general_chat_queue = self.manager.Queue()
self.logger = get_logger('chats')
def create_queue_for_bot(self, index, team):
bot_queue = self.manager.Queue()
queue_holder = dict()
queue_holder["input"] = bot_queue
queue_holder["output"] = self.general_chat_queue
self.bot_queues[index] = (team, bot_queue)
return queue_holder
def process_queue(self, quit_event):
while not quit_event.is_set():
try:
next_message = self.general_chat_queue.get(timeout=0.01)
index, team, team_only, message_details = next_message
self.logger.debug('got quick chat from bot %s on team %s with message %s:', index, team,
QuickChats.quick_chat_list[message_details])
for i in self.bot_queues:
bots = self.bot_queues[i]
if i == index:
# do not send yourself a message
continue
if bots[0] != team and team_only:
# do not send to other team if team only
continue
bots[1].put((index, team, message_details))
self.game_interface.send_chat(index, team_only, message_details)
except queue.Empty:
pass
def start_manager(self, quit_event):
thread = Thread(target=self.process_queue, args=(quit_event,))
thread.start()
return thread
|
Main.py
|
# -*- coding: UTF-8 -*-
import os
from multiprocessing import Process
from wx.lib.agw import ultimatelistctrl as ULC
from webserver import PythonWebServer
from images import images as image
from presenter.presenter import *
from task.task import Task
import lang.lang as LANG
import tool.tools as tool
USE_GENERIC = 0
if USE_GENERIC:
from wx.lib.stattext import GenStaticText as StaticText
else:
StaticText = wx.StaticText
SPLASH_WIDTH = 400
SPLASH_HEIGHT = 250
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 500
# ---------------------------------------------------------------------------
__sWebProcess = None
def opj(path):
"""Convert paths to the platform-specific separator"""
st = apply(os.path.join, tuple(path.split('/')))
# HACK: on Linux, a leading / gets lost...
if path.startswith('/'):
st = '/' + st
return st
def asyncStartWebServer(start_callback, stop_callback):
global __sWebProcess
if __sWebProcess and __sWebProcess.is_alive():
print "__sWebProcess is running"
return False
__sWebProcess = Process(target=__asyncStartWebServer, args=(start_callback, stop_callback))
__sWebProcess.daemon = True
__sWebProcess.start()
def __asyncStartWebServer(start_callback=None, stop_callback=None):
PythonWebServer.addServerStartListener(start_callback)
PythonWebServer.addServerStopedListener(stop_callback)
PythonWebServer.startServer()
# ---------------------------------------------------------------------------
def main():
try:
demoPath = os.path.dirname(__file__)
os.chdir(demoPath)
except:
pass
app = Application(False)
app.MainLoop()
# --------------------------------------------------------------------------
class Application(wx.App):
"""
应用启动开始
WxPython 的 Application 实例
"""
def OnInit(self):
self.SetAppName(LANG.app_name)
splash = SplashScreen()
splash.Show()
return True
# ---------------------------------------------------------------------------
class SplashScreen(wx.SplashScreen):
"""
Splash欢迎页,启动 Web Server,用来接收网页的点击事件
由Application启动
"""
# 用于监听网页点击事件的WEB服务
global __sWebProcess
def __init__(self):
# Splash页面显示时间长度
showtime = 2000
wx.SplashScreen.__init__(self,
image.app_splash.GetBitmap(),
wx.SPLASH_CENTRE_ON_SCREEN | wx.SPLASH_TIMEOUT,
showtime, None, -1)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.fc = wx.FutureCall(showtime, self.ShowMain)
# register service callback
asyncStartWebServer(self.__onWebServerStarted, self.__onWebServerStopped)
# TODO TypeError: can't pickle PySwigObject objects in windows
def __onWebServerStarted(self):
print "Server Started"
# TODO TypeError: can't pickle PySwigObject objects in windows
def __onWebServerStopped(self, reason):
global __sWebProcess
print "Server stopped: ", reason
if reason == 0x01:
pass
elif reason == 0x02:
print PythonWebServer.__STOP_REASON__[reason]
pass
elif reason == 0x03:
pass
# if __sWebProcess and __sWebProcess.is_alive:
# print "__sWebProcess.terminate()"
# __sWebProcess.terminate()
def OnClose(self, evt):
# Make sure the default handler runs too so this window gets
# destroyed
evt.Skip()
self.Hide()
# if the timer is still running then go ahead and show the
# main frame now
if self.fc.IsRunning():
self.fc.Stop()
self.ShowMain()
def ShowMain(self):
frame = MainWindow(None)
frame.Show()
if self.fc.IsRunning():
self.Raise()
# ---------------------------------------------------------------------------
class AppStatusBar(wx.StatusBar):
"""
应用的状态栏,位于应用底部,一般用来表示一些状态
"""
__Target_Field = 1
def __init__(self, parent, level='info', str=''):
wx.StatusBar.__init__(self, parent, -1)
self.__info__ = {
"info": image.web_service_info.GetBitmap(),
"success": image.web_service_success.GetBitmap(),
"error": image.web_service_error.GetBitmap()
}
self.state = level
self.parent = parent
self.string = str
# This status bar has one field
self.SetFieldsCount(2)
self.SetStatusWidths([-22, -1])
self.sizeChanged = False
self.Bind(wx.EVT_SIZE, self.__OnSize)
self.Bind(wx.EVT_IDLE, self.__OnIdle)
self.SetLevel(level=level)
self.SetString(string=str)
# set the initial position of the message
self.__Reposition()
self.Bind(wx.EVT_ENTER_WINDOW, self.__OnEnterWindow)
self.Bind(wx.EVT_LEAVE_WINDOW, self.__OnLevelWindow)
def SetLevel(self, level):
self.state = level
image = self.__getStateImage()
# self.stateIcon = wx.BitmapButton(self, -1, image ,(image.GetWidth(), image.GetHeight()))
self.stateIcon = wx.StaticBitmap(self, -1, image)
self.__Reposition()
return self
def SetString(self, string):
self.string = self.__getStateString(string)
self.SetStatusText(self.string, 0)
return self
def GetLevel(self):
return self.state
def __getStateImage(self):
return self.__info__[self.state]
def __getStateString(self, str):
# return " " + str
return str
def __OnSize(self, evt):
self.__Reposition() # for normal size events
# Set a flag so the idle time handler will also do the repositioning.
# It is done this way to get around a buglet where GetFieldRect is not
# accurate during the EVT_SIZE resulting from a frame maximize.
self.sizeChanged = True
def __OnIdle(self, evt):
if self.sizeChanged:
self.__Reposition()
# reposition the checkbox
def __Reposition(self):
rect = self.GetFieldRect(self.__Target_Field)
self.stateIcon.SetPosition((rect.x + 6, rect.y + 3))
self.stateIcon.SetSize((rect.height - 4, rect.height - 4))
self.sizeChanged = False
def __OnEnterWindow(self, evt):
print "__OnEnterWindow"
# self.SetToolTipString("jahahahahahah")
# self.stateDetail = wx.Panel(self.parent, -1, pos=(10, 400), size=(200,40))
# self.stateDetail.SetBackgroundColour('yellow')
def __OnLevelWindow(self, evt):
print "__OnLevelWindow"
# self.stateDetail.Hide()
# -----------------------------------------------------------------------------------------
class AppShowWindow(wx.SplitterWindow):
def __init__(self, parent, ID):
wx.SplitterWindow.__init__(self, parent, ID, style=wx.SP_LIVE_UPDATE)
# ------------------------------------------------------------------------------------------
class MyUlcMainWindow(ULC.UltimateListMainWindow):
def OnCompareItems(self, line1, line2):
item = ULC.UltimateListItem()
item1 = line1.GetItem(0, item)
item = ULC.UltimateListItem()
item2 = line2.GetItem(0, item)
print item1.__hash__, item2.__hash__
data1 = item1.GetPyData()
data2 = item2.GetPyData()
print data1, data2
return -1
if self.__func:
return self.__func(data1, data2)
else:
return cmp(data1, data2)
# -------------------------
class MyUlc(ULC.UltimateListCtrl):
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=0, agwStyle=0, validator=wx.DefaultValidator, name="UltimateListCtrl"):
# super(MyUlc, self).__init__(parent, id, pos, size, style, agwStyle, validator, name)
self._imageListNormal = None
self._imageListSmall = None
self._imageListState = None
if not agwStyle & ULC.ULC_MASK_TYPE:
raise Exception("UltimateListCtrl style should have exactly one mode bit set")
if not (agwStyle & ULC.ULC_REPORT) and agwStyle & ULC.ULC_HAS_VARIABLE_ROW_HEIGHT:
raise Exception("Style ULC_HAS_VARIABLE_ROW_HEIGHT can only be used in report, non-virtual mode")
if agwStyle & ULC.ULC_STICKY_HIGHLIGHT and agwStyle & ULC.ULC_TRACK_SELECT:
raise Exception("Styles ULC_STICKY_HIGHLIGHT and ULC_TRACK_SELECT can not be combined")
if agwStyle & ULC.ULC_NO_HEADER and agwStyle & ULC.ULC_HEADER_IN_ALL_VIEWS:
raise Exception("Styles ULC_NO_HEADER and ULC_HEADER_IN_ALL_VIEWS can not be combined")
if agwStyle & ULC.ULC_USER_ROW_HEIGHT and (agwStyle & ULC.ULC_REPORT) == 0:
raise Exception("Style ULC_USER_ROW_HEIGHT can be used only with ULC_REPORT")
wx.PyControl.__init__(self, parent, id, pos, size, style | wx.CLIP_CHILDREN, validator, name)
self._mainWin = None
self._headerWin = None
self._footerWin = None
self._headerHeight = wx.RendererNative.Get().GetHeaderButtonHeight(self)
self._footerHeight = self._headerHeight
if wx.Platform == "__WXGTK__":
style &= ~wx.BORDER_MASK
style |= wx.BORDER_THEME
else:
if style & wx.BORDER_THEME:
style -= wx.BORDER_THEME
self._agwStyle = agwStyle
if style & wx.SUNKEN_BORDER:
style -= wx.SUNKEN_BORDER
self._mainWin = MyUlcMainWindow(self, wx.ID_ANY, wx.Point(0, 0), wx.DefaultSize, style, agwStyle)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._mainWin, 1, wx.GROW)
self.SetSizer(sizer)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.CreateOrDestroyHeaderWindowAsNeeded()
self.CreateOrDestroyFooterWindowAsNeeded()
self.SetInitialSize(size)
wx.CallAfter(self.Layout)
# ------------------------------------------------------------------
class UlcTaskList(wx.Panel):
""""""
def AddTask(self, task):
# 先插入item,并形成item,然后排序
if task.state != Task.__STATE_WAITING__:
tool.log("AddTask.error", "Only task in waiting can ne added")
return
index = self.ulc.GetItemCount()
self.ulc.InsertImageStringItem(index, task.log_path, task.state)
path_item = self.ulc.GetItem(index, 0)
path_item.SetPyData(task)
self.ulc.SetItem(path_item)
state_item = self.ulc.GetItem(index, 1)
if task.state == Task.__STATE_WAITING__:
state_item.SetText(LANG.task_state_waiting)
self.ulc.SetItem(state_item)
self.ulc.SortItems()
self.ulc.Refresh()
def UpdateTaskProgress(self, task, progress):
if task.state != Task.__STATE_PROCESSING__:
tool.log("UpdateTaskProgress.error", "Wrong State %d" % task.state)
return
index = self.ulc.FindItem(-1, task.log_path)
if index != wx.NOT_FOUND:
state_item = self.ulc.GetItem(index, 1)
gauge = state_item.GetWindow()
gauge.SetValue(min(max(0, int(progress)), 100))
def UpdateTaskState(self, task):
if task.state >= Task.__STATE_DONE__:
tool.log("UpdateTaskState.error", "Wrong State %d" % task.state)
raise Exception, "Wrong State %d" % task.state
index = self.ulc.FindItem(-1, task.log_path)
if index != wx.NOT_FOUND:
path_item = self.ulc.GetItem(index, 0)
path_item.SetImage(task.state)
path_item.SetPyData(task)
self.ulc.SetItem(path_item)
state_item = self.ulc.GetItem(index, 1)
# try:
if task.state == Task.__STATE_PROCESSING__:
state_item.SetText('')
gauge = wx.Gauge(self.ulc, -1, size=(200, 20), style=wx.GA_HORIZONTAL | wx.GA_SMOOTH)
state_item.SetWindow(gauge)
elif task.state == Task.__STATE_PAUSED__:
state_item.DeleteWindow()
state_item.SetText(LANG.task_state_paused)
elif task.state == Task.__STATE_GENERATING__:
state_item.DeleteWindow()
state_item.SetText(LANG.task_state_generating)
# except Exception as e:
# tool.log("UpdateTaskState.error", e.message)
self.ulc.SetItem(state_item)
if self.ulc.GetItemCount() > 1:
self.ulc.SortItems()
self.ulc.Refresh()
def RemoveTask(self, task):
index = self.ulc.FindItem(-1, task.log_path)
self.ulc.DeleteItem(index)
# 1. process => paused => generating => waiting
# 2. process create time max => min
# 3. paused create time max => min
# 4. waiting create time min => max
# ps. generating stay
def OnCompareItems(self, item1, item2):
print item1, item2
print "OnCompareItems, item1 {0} item2 {1}".format(item1, item2)
# ran = int(random.uniform(-10,10))
# print "OnCompareItems :" , ran
return -1
# if item1.GetText()[0:1] == 'y':
# return -1
# else:
# return 1
task1 = item1.GetPyData()
task2 = item2.GetPyData()
if task1.state == Task.__STATE_PROCESSING__:
if task2.state == Task.__STATE_PROCESSING__ \
and task2.create_time:
pass
else:
return 1
def OnItemSelected(self, event):
self.currentIndex = event.m_itemIndex
print "OnItemSelected: %s, %s\n" % (self.currentIndex, self.ulc.GetItemText(self.currentIndex))
if self.ulc.GetPyData(self.currentIndex):
print ("PYDATA = %s\n" % repr(self.ulc.GetPyData(self.currentIndex)))
event.Skip()
def OnHyperTextClicked(self, event):
print "You click a hypertext"
self.currentIndex = event.m_itemIndex
item = self.ulc.GetItem(self.currentIndex, 1)
if item.GetPyData():
print ("PYDATA = %s\n" % repr(item.GetPyData()))
allcount = self.ulc.GetItemCount()
print allcount
self.ulc.InsertImageStringItem(allcount,
"/home/qinsw/pengtian/tmp/cmcc_monkey/asrlog-0037(1122)/asrlog-2017-11-21-17-06-29/1/android",
0)
event.Skip()
# ----------------------------------------------------------------------
def __init__(self, parent):
"""Constructor"""
wx.Panel.__init__(self, parent)
self._STATE_IMAGE_DICT_ = {
Task.__STATE_WAITING__: image.task_waiting.getBitmap(),
Task.__STATE_PROCESSING__: image.task_process.getBitmap(),
Task.__STATE_PAUSED__: image.task_paused.getBitmap(),
Task.__STATE_GENERATING__: image.task_generating.getBitmap()
}
try:
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
boldfont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
except AttributeError:
# wxPython 4 / Phoenix updated SystemSettings
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
boldfont = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.il = ULC.PyImageList(20, 20)
self.il.Add(self._STATE_IMAGE_DICT_[Task.__STATE_WAITING__]) # 0
self.il.Add(self._STATE_IMAGE_DICT_[Task.__STATE_PROCESSING__]) # 1
self.il.Add(self._STATE_IMAGE_DICT_[Task.__STATE_PAUSED__]) # 2
self.il.Add(self._STATE_IMAGE_DICT_[Task.__STATE_GENERATING__]) # 3
self.ulc = MyUlc(self, agwStyle=wx.LC_REPORT | wx.LC_VRULES | wx.LC_HRULES)
self.ulc.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.Bind(ULC.EVT_LIST_ITEM_HYPERLINK, self.OnHyperTextClicked, self.ulc)
self.Bind(ULC.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self.ulc)
# 参考资料
# http://xoomer.virgilio.it/infinity77/Phoenix/lib.agw.ultimatelistctrl.UltimateListItem.html#lib.agw.ultimatelistctrl.UltimateListItem
# 设置第一列的样式
# 创建一个ULC list item
info = ULC.UltimateListItem()
# mask可以出现哪些形式的
info._mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT | ULC.ULC_MASK_PYDATA
info._format = ULC.ULC_FORMAT_LEFT
info._text = LANG.task_log_path
self.ulc.InsertColumnInfo(0, info)
info = ULC.UltimateListItem()
info._format = ULC.ULC_FORMAT_LEFT
info._mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT | ULC.ULC_MASK_HYPERTEXT
info._text = LANG.task_status
self.ulc.InsertColumnInfo(1, info)
self.ulc.SetColumnWidth(0, 600)
self.ulc.SetColumnWidth(1, 200)
# self.ulc.InsertImageStringItem(0, "yyyyyy", 0)
# item = self.ulc.GetItem(5, 1)
# item.SetHyperText(True)
# s = "https://www.google.com.hk"
# item.SetPyData(s)
# self.ulc.SetItem(item)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.ulc, 1, flag=wx.EXPAND)
self.SetSizer(sizer)
# ------------------------------------------------------------------------------------------
class UlcTaskDoneList(wx.Panel):
def AddTask(self, task):
self.ulc.InsertStringItem(0, task.finish_time)
# 淡绿色
colour = wx.Colour(144, 238, 144)
if task.state == Task.__STATE_FAILED__:
# 橙红色
colour = wx.Colour(255, 69, 0)
time_col = self.ulc.GetItem(0, 0)
time_col.SetBackgroundColour(colour)
self.ulc.SetItem(time_col)
log_col = self.ulc.GetItem(0, 1)
log_col.SetText(task.log_path)
log_col.SetBackgroundColour(colour)
self.ulc.SetItem(log_col)
if task.state == Task.__STATE_DONE__:
hyper_scan_col = self.ulc.GetItem(0, 2)
hyper_scan_col.SetHyperText()
hyper_scan_col.SetText("浏览")
hyper_scan_col.SetBackgroundColour(colour)
hyper_scan_col.SetPyData(task)
self.ulc.SetItem(hyper_scan_col)
# hyper_open_col = self.ulc.GetItem(0, 3)
# hyper_open_col.SetHyperText()
# hyper_open_col.SetText("打开")
# hyper_open_col.SetBackgroundColour(colour)
# hyper_open_col.SetPyData(task)
# self.ulc.SetItem(hyper_open_col)
if task.state == Task.__STATE_FAILED__:
hyper_scan_col = self.ulc.GetItem(0, 2)
hyper_scan_col.SetText("无法解析文件")
hyper_scan_col.SetBackgroundColour(colour)
self.ulc.SetItem(hyper_scan_col)
# log_col = ULC.UltimateListItem()
# log_col.SetColumn(1)
# log_col.SetBackgroundColour()
# log_col.SetHyperText()
self.ulc.SortItems()
self.ulc.Refresh()
def RemoveTask(self, task):
# task.log_path
# self.ulc.InsertImageStringItem(0, "haha", 0)
# task state_
pass
def OnItemSelected(self, event):
self.currentIndex = event.m_itemIndex
print "OnItemSelected: %s, %s\n" % (self.currentIndex, self.ulc.GetItemText(self.currentIndex))
if self.ulc.GetPyData(self.currentIndex):
print ("PYDATA = %s\n" % repr(self.ulc.GetPyData(self.currentIndex)))
# event.Skip()
def OnHyperTextClicked(self, event):
# event see CommandListEvent in ULC
# 存在一个问题, 同一列有两个超链接时,没有信息来分辨是点击了哪个超链接
print "You click a hypertext"
self.currentIndex = event.m_itemIndex
hyper_item = self.ulc.GetItem(event.m_itemIndex, 2)
if hyper_item.GetPyData():
print ("PYDATA = %s\n" % repr(hyper_item.GetPyData()))
tool.open_browser(hyper_item.GetPyData().result_path)
event.Skip()
# ----------------------------------------------------------------------
def __init__(self, parent):
"""Constructor"""
wx.Panel.__init__(self, parent)
try:
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
boldfont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
except AttributeError:
# wxPython 4 / Phoenix updated SystemSettings
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
boldfont = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.il = ULC.PyImageList(20, 20)
self.il.Add(image.task_done.getBitmap())
self.ulc = ULC.UltimateListCtrl(self, agwStyle=wx.LC_REPORT | wx.LC_VRULES | wx.LC_HRULES)
self.ulc.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.Bind(ULC.EVT_LIST_ITEM_HYPERLINK, self.OnHyperTextClicked, self.ulc)
# self.Bind(ULC.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self.ulc)
# 参考资料
# http://xoomer.virgilio.it/infinity77/Phoenix/lib.agw.ultimatelistctrl.UltimateListItem.html#lib.agw.ultimatelistctrl.UltimateListItem
# 设置第一列的样式
# 创建一个ULC list item
title = ULC.UltimateListItem()
# mask可以出现哪些形式的
title._mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT \
| ULC.ULC_MASK_BACKCOLOUR \
| ULC.ULC_MASK_FONTCOLOUR
title._format = ULC.ULC_FORMAT_LEFT
title._text = LANG.finished_time
# title._colour = wx.Colour(199, 21, 133)
# title.SetBackgroundColour(wx.Colour(199,21,133))
self.ulc.InsertColumnInfo(0, title)
# self.ulc.InsertItem(title)
log_path_item = ULC.UltimateListItem()
# mask可以出现哪些形式的
log_path_item._mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT \
| ULC.ULC_MASK_BACKCOLOUR \
| ULC.ULC_MASK_FONTCOLOUR \
| ULC.ULC_MASK_PYDATA
log_path_item._format = ULC.ULC_FORMAT_LEFT
log_path_item._text = LANG.log_path
self.ulc.InsertColumnInfo(1, log_path_item)
op = ULC.UltimateListItem()
op._format = ULC.ULC_FORMAT_LEFT
op._mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT \
| ULC.ULC_MASK_HYPERTEXT \
| ULC.ULC_MASK_BACKCOLOUR \
| ULC.ULC_MASK_FONTCOLOUR \
| ULC.ULC_MASK_PYDATA
self.ulc.InsertColumnInfo(2, op)
# op = ULC.UltimateListItem()
# op._format = ULC.ULC_FORMAT_LEFT
# op._mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT \
# | ULC.ULC_MASK_HYPERTEXT \
# | ULC.ULC_MASK_BACKCOLOUR \
# | ULC.ULC_MASK_FONTCOLOUR
# self.ulc.InsertColumnInfo(3, op)
self.ulc.SetColumnWidth(0, 150)
self.ulc.SetColumnWidth(1, 550)
self.ulc.SetColumnWidth(2, 100)
#self.ulc.SetColumnWidth(3, 50)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.ulc, 1, flag=wx.EXPAND)
self.SetSizer(sizer)
# -------------------------------------------------------------------------------------------
class AppToolBar(wx.ToolBar):
__TBFLAGS__ = (wx.TB_HORIZONTAL
| wx.NO_BORDER
| wx.TB_FLAT
# | wx.TB_TEXT
# | wx.TB_HORZ_LAYOUT
)
TOOL_NEW = 10
TOOL_CLEAN = 20
TOOL_HELP = 30
def __init__(self, parent):
wx.ToolBar.__init__(self, parent, style=self.__TBFLAGS__)
tsize = (24, 24)
bm_help = wx.ArtProvider.GetBitmap(wx.ART_HELP, wx.ART_TOOLBAR, tsize)
self.SetToolBitmapSize(tsize)
bm_new = image.action_new.GetBitmap()
self.AddLabelTool(self.TOOL_NEW, "New", bm_new, shortHelp="新增任务", longHelp="在任务列表中新增任务")
self.AddSeparator()
bm_history = image.action_clean_history.GetBitmap()
self.AddLabelTool(self.TOOL_CLEAN, "Clean", bm_history, shortHelp="删除历史", longHelp="删除列表中的历史记录")
self.AddSeparator()
self.AddLabelTool(self.TOOL_HELP, "Help", bm_help, shortHelp="帮助", longHelp="帮助文档")
self.AddSeparator()
def SetOnToolClicked(self, callback):
self.OnToolClick = callback
self.Bind(wx.EVT_TOOL, self.OnToolClick, id=self.TOOL_NEW)
self.Bind(wx.EVT_TOOL, self.OnToolClick, id=self.TOOL_CLEAN)
self.Bind(wx.EVT_TOOL, self.OnToolClick, id=self.TOOL_HELP)
# ---------------------------------------------------------------------------------------------
class AppNewTaskDialog(wx.Dialog):
LogPath = ''
SrcPath = ''
LogChooseType = ''
__LOG_TEXT_ID = 0x001
__LOG_BTN_ID = 0x002
__LOG_CHOOSE_ID = 0x003
__SRC_TEXT_ID = 0x010
__SRC__BTN_ID = 0x020
__WILDCARD = "All files (*.*)|*.*"
__LOG_FILE_TYPE = ['file', 'dir']
__STYLE_FILE = __LOG_FILE_TYPE[0]
__STYLE_DIR = __LOG_FILE_TYPE[1]
def __init__(
self, parent, ID, title, size=wx.DefaultSize, pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE,
useMetal=False,
):
self.LogPath = ''
self.SrcPath = ''
# Instead of calling wx.Dialog.__init__ we precreate the dialog
# so we can set an extra style that must be set before
# creation, and then we create the GUI object using the Create
# method.
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, ID, title, pos, size, style)
# This next step is the most important, it turns this Python
# object into the real wrapper of the dialog (instead of pre)
# as far as the wxPython extension is concerned.
self.PostCreate(pre)
# This extra style can be set after the UI object has been created.
if 'wxMac' in wx.PlatformInfo and useMetal:
self.SetExtraStyle(wx.DIALOG_EX_METAL)
# Now continue with the normal construction of the dialog
# contents
sizer = wx.BoxSizer(wx.VERTICAL)
# --------------------------------------------------------
# log
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "Log:")
label.SetHelpText("Please select the log file/directory.")
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.LogText = text = wx.TextCtrl(self, self.__LOG_TEXT_ID, "", size=(300, -1), style=wx.TE_READONLY)
self.LogText.Bind(wx.EVT_TEXT, self.__OnLogTextChanged)
text.SetHelpText("Please select the log file/directory.")
box.Add(text, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
ch = wx.Choice(self, self.__LOG_CHOOSE_ID, (100, 50), choices=self.__LOG_FILE_TYPE)
ch.Bind(wx.EVT_CHOICE, self.__LogTypeChoosed, ch)
self.LogChooseType = ch.GetStringSelection()
box.Add(ch, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
bsize = (16, 16)
open = wx.ArtProvider.GetBitmap(wx.ART_FOLDER_OPEN, wx.ART_TOOLBAR, bsize)
btn = wx.BitmapButton(self, self.__LOG_BTN_ID, open, size=(40, -1))
btn.SetHelpText("Please select the log file/directory.")
btn.Bind(wx.EVT_BUTTON, self.__OnButtonClicked, id=self.__LOG_BTN_ID)
box.Add(btn, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(box, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
# error tip
self.logErrorTip = wx.StaticText(self, -1, "Log path must be set")
self.logErrorTip.SetForegroundColour((0xff, 0, 0))
sizer.Add(self.logErrorTip, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
# --------------------------------------------------------
# src
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "Src:")
label.SetHelpText("Please select the project root directory.")
box.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.SrcText = text = wx.TextCtrl(self, self.__SRC_TEXT_ID, "", size=(300, -1), style=wx.TE_READONLY)
text.SetHelpText("Please select the project root directory.")
box.Add(text, 1, wx.ALIGN_CENTRE | wx.ALL, 5)
bsize = (16, 16)
open = wx.ArtProvider.GetBitmap(wx.ART_FOLDER_OPEN, wx.ART_TOOLBAR, bsize)
btn = wx.BitmapButton(self, self.__SRC__BTN_ID, open, size=(40, -1))
btn.SetHelpText("Please select the log file/directory.")
btn.Bind(wx.EVT_BUTTON, self.__OnButtonClicked, id=self.__SRC__BTN_ID)
box.Add(btn, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
sizer.Add(box, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
# --------------------------------------------------------
# line
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.TOP, 5)
# --------------------------------------------------------
# cancel and ok button
btnsizer = wx.StdDialogButtonSizer()
# if wx.Platform != "__WXMSW__":
# btn = wx.ContextHelpButton(self)
# btnsizer.AddButton(btn)
btn = wx.Button(self, wx.ID_OK, "新增")
btn.Bind(wx.EVT_BUTTON, self.__OnConfirmClicked)
# btn.SetHelpText("The OK button completes the dialog")
btn.SetDefault()
btnsizer.AddButton(btn)
btn = wx.Button(self, wx.ID_CANCEL, "取消")
# btn.SetHelpText("The Cancel button cancels the dialog. (Cool, huh?)")
btnsizer.AddButton(btn)
btnsizer.Realize()
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
def __OnLogTextChanged(self, env):
if env.GetString():
self.logErrorTip.Hide()
else:
self.logErrorTip.Show()
def __OnConfirmClicked(self, env):
print "clicked"
if not self.LogPath or self.LogPath == '':
pass
else:
env.Skip()
def __OnButtonClicked(self, env):
print "__OnButtonClicked ", env.GetId()
id = env.GetId()
if id == self.__LOG_BTN_ID:
self.LogPath = self.__ChooseFileOrDir("Choose Log Files/Directory", self.LogChooseType)
self.LogText.SetValue(self.LogPath)
elif id == self.__SRC__BTN_ID:
self.SrcPath = self.__ChooseFileOrDir("Choose Src Directory", self.__STYLE_DIR)
self.SrcText.SetValue(self.SrcPath)
def __LogTypeChoosed(self, env):
print "__LogTypeChoose ", env.GetId()
id = env.GetId()
if id == self.__LOG_CHOOSE_ID:
self.LogChooseType = env.GetString()
print env.GetString()
def __ChooseFileOrDir(self, message, style):
path = ''
if style == self.__STYLE_DIR:
# In this case we include a "New directory" button.
dlg = wx.DirDialog(self, message,
style=wx.DD_DEFAULT_STYLE
# | wx.DD_DIR_MUST_EXIST
# | wx.DD_CHANGE_DIR
)
# If the user selects OK, then we process the dialog's data.
# This is done by getting the path data from the dialog - BEFORE
# we destroy it.
if dlg.ShowModal() == wx.ID_OK:
print 'You selected: %s\n' % dlg.GetPath()
path = dlg.GetPath()
# Only destroy a dialog after you're done with it.
dlg.Destroy()
elif style == self.__STYLE_FILE:
# Create the dialog. In this case the current directory is forced as the starting
# directory for the dialog, and no default file name is forced. This can easilly
# be changed in your program. This is an 'open' dialog, and allows multitple
# file selections as well.
#
# Finally, if the directory is changed in the process of getting files, this
# dialog is set up to change the current working directory to the path chosen.
dlg = wx.FileDialog(
self, message=message,
defaultDir=tool.getHomePath(),
defaultFile="",
wildcard=self.__WILDCARD,
style=wx.OPEN | wx.CHANGE_DIR # | wx.MULTIPLE
)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
print 'You selected: %s\n' % dlg.GetPath()
path = dlg.GetPath()
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
return path
# ---------------------------------------------------------------------------------------------
# 主框架
# Pengtian
class MainWindow(wx.Frame, UIActionInterface):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, LANG.app_name, size=(800, 500))
self.Centre()
self.SetMinSize((800, 500))
self.SetMaxSize((800, 500))
self.mainWindow()
self.statusbar()
self.toolbar()
self.presenter = Presenter(self)
# PRESENTER.setUI(self)
# 主体窗口
def mainWindow(self):
self.mWindow = wx.SplitterWindow(self, style=wx.SP_LIVE_UPDATE)
self.ulcTaskPanel = UlcTaskList(self.mWindow) # wx.Panel(self.finished_sp, -1, style=wx.SUNKEN_BORDER)
self.ulcTaskDonePanel = UlcTaskDoneList(self.mWindow) # wx.Panel(self.finished_sp, -1, style=wx.SUNKEN_BORDER)
self.mWindow.SplitHorizontally(self.ulcTaskPanel, self.ulcTaskDonePanel, 200)
self.mWindow.SetMinimumPaneSize(30)
# 状态栏
def statusbar(self):
self.sb = AppStatusBar(self)
self.SetStatusBar(self.sb)
# 工具栏
def toolbar(self):
self.tb = AppToolBar(self)
self.SetToolBar(self.tb)
self.tb.SetOnToolClicked(self.__OnToolBarItemClick)
# interface for presenter
# --------------------------------------------------
def AddTaskToProcessPanel(self, task):
self.ulcTaskPanel.AddTask(task)
def AddTaskFailed(self, task, msg):
print task, msg
def UpdateTaskProgress(self, task, progress):
self.ulcTaskPanel.UpdateTaskProgress(task, progress)
def UpdateTaskInProcessPanel(self, task):
self.ulcTaskPanel.UpdateTaskState(task)
def RemoveTaskFromProcessing(self, task):
self.ulcTaskPanel.RemoveTask(task)
def AddTaskToDone(self, task):
self.ulcTaskDonePanel.AddTask(task)
def RemoveTaskFromDone(self, task):
self.ulcTaskDonePanel.RemoveTask(task)
# --------------------------------------------------
# 工具栏点击处理
def __OnToolBarItemClick(self, evt):
print "__OnToolBarItemClick ", evt.GetId()
id = evt.GetId()
if id == AppToolBar.TOOL_NEW:
# step1
# if self.ulcTaskPanel.ulc.GetItemCount() <= 0:
# task = Task("yyyyyy", "srcPath", state=Task.__STATE_WAITING__)
# else:
# task = Task("yyyyyy" + time.time(), "srcPath", state=Task.__STATE_WAITING__)
# self.AddTaskToProcessPanel(task)
# # step2
# task.state = Task.__STATE_PROCESSING__
# self.UpdateTaskInProcessPanel(task)
#
# # step3
# self.UpdateTaskProgress(task, 20)
self.__DoNew()
elif id == AppToolBar.TOOL_CLEAN:
# task = Task("yyyyyy", "srcPath", state=Task.__STATE_WAITING__)
# self.RemoveTaskFromProcessing(task)
self.__DoClean()
elif id == AppToolBar.TOOL_HELP:
self.__ShowHelp()
def __DoNew(self):
self.__ShowNewDialog()
def __ShowNewDialog(self):
dlg = AppNewTaskDialog(self, -1, "Add new task", size=(500, 200),
# style=wx.CAPTION | wx.SYSTEM_MENU | wx.THICK_FRAME,
style=wx.DEFAULT_DIALOG_STYLE, # & ~wx.CLOSE_BOX,
)
dlg.CenterOnScreen()
val = dlg.ShowModal()
if val == wx.ID_OK:
print "dlg.LogPath %s" % dlg.LogPath
print "dlg.SrcPath %s" % dlg.SrcPath
# TODO presenter
self.presenter.create_task(dlg.LogPath, dlg.SrcPath)
print "You pressed OK\n"
else:
print "You pressed Cancel\n"
dlg.Destroy()
def __DoClean(self):
pass
def __ShowCleanDialog(self):
pass
def __ShowHelp(self):
pass
# ---------------------------------------------------------------------------
if __name__ == "__main__":
print 'current pid is %s' % os.getpid()
main()
|
OptimalThresholdSVM.py
|
# This script computes the best SVM threshold value that maximises the F1 score.
# Please set the validation_set_path variable to the current location of the validation samples before running the script.
import numpy as np
import glob, os
import matplotlib.pyplot as plt
from math import copysign
from sklearn.metrics.classification import precision_recall_fscore_support
from multiprocessing import Process, Manager
import math, scipy
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
validation_set_path = "./structures/"
c = len(validation_set_path)
files = glob.glob(validation_set_path + "*_ab.pdb")
all_scores = []
all_truth = []
thresholds =[]
pdb_ids = []
manager = Manager()
for f in sorted(files) :
interface_residues = set()
ab_filename = f
ab_id = os.path.basename(f)[:-7]
ab_patch_score = "%s%s_ab_patch_score.txt" % (validation_set_path, ab_id)
ab_patch_truth = "%s%s_ab_patch_truth.txt" % (validation_set_path, ab_id)
with open(ab_patch_score) as pred, open(ab_patch_truth) as truth :
patch_pred = [float(x) for x in pred.readlines()]
patch_truth = [int(x) for x in truth.readlines()]
pdb_ids.append(ab_id)
all_scores.append(patch_pred)
all_truth.append(patch_truth)
all_thresholds = np.unique(np.concatenate(all_scores))
print len(all_thresholds)
R = 20
pad_size = math.ceil(float(all_thresholds.size)/R)*R - all_thresholds.size
all_thresholds = np.append(all_thresholds, np.zeros(int(pad_size))*np.NaN)
all_thresholds = scipy.nanmean(all_thresholds.reshape(-1,R), axis=1)
print len(all_thresholds)
precision = manager.list([0 for _ in all_thresholds])
recall = manager.list([0 for _ in all_thresholds])
def compute(indices, all_truth ,all_scores, pdb_ids, all_thresholds, precision, recall):
print all_thresholds[indices[0]]
for t in indices :
for i in xrange(len(pdb_ids)) :
p, r, _, _ = precision_recall_fscore_support(all_truth[i], [copysign(1, x - all_thresholds[t]) for x in all_scores[i]], average='binary')
precision[t] += p
recall[t] += r
precision[t] /= len(pdb_ids)
recall[t] /= len(pdb_ids)
# Parallel(n_jobs=12)(delayed(compute)(t, all_truth ,all_scores, pdb_ids, all_thresholds, precision, recall) for t in xrange(len(all_thresholds)))
L = chunkIt(range(len(all_thresholds)), 100)
job = [Process(target=compute, args=(indices, all_truth ,all_scores, pdb_ids, all_thresholds, precision, recall)) for indices in L]
_ = [p.start() for p in job]
_ = [p.join() for p in job]
thresholds_f1scores = [(all_thresholds[t], 2 * precision[t] * recall[t] / (precision[t] + recall[t])) for t in xrange(len(all_thresholds))]
best_pair = max(thresholds_f1scores, key=lambda x:x[1])
print ("Maximum F1 obtained for threshold: %s" % str(best_pair))
plt.figure(2, figsize=(10, 10), dpi=1200)
plt.xlim([all_thresholds[0], all_thresholds[-1]])
plt.ylim([0.0, 1.05])
plt.xlabel('Threshold values')
plt.ylabel('F1 score')
plt.title('Threshold versus F1 scores')
plt.plot(all_thresholds, [a[1] for a in thresholds_f1scores], color='navy', linestyle='solid', linewidth=2)
plt.scatter(best_pair[0], best_pair[1], marker='x', color='red', s=40)
plt.plot([best_pair[0], best_pair[0]], [0, best_pair[1]], linestyle="dotted", linewidth=1, color='red')
plt.plot([all_thresholds[0], best_pair[0]], [best_pair[1], best_pair[1]], linestyle="dotted", linewidth=1, color='red')
plt.annotate("(%.4f, %.4f)" % (best_pair[0], best_pair[1]), xy=(best_pair[0], best_pair[1]), xytext=(-140, 30),
textcoords='offset points', arrowprops=dict(arrowstyle="->", connectionstyle="angle,angleA=0,angleB=90,rad=10"))
# plt.legend()
plt.savefig("threshold_for_best_F1_score.pdf", dpi=1200, bbox_inches='tight')
plt.close(2)
plt.clf()
|
test_sockets.py
|
from __future__ import print_function
import os, multiprocessing, subprocess, socket, time
from runner import BrowserCore, path_from_root
from tools.shared import *
node_ws_module_installed = False
try:
NPM = os.path.join(os.path.dirname(NODE_JS[0]), 'npm.cmd' if WINDOWS else 'npm')
except:
pass
def clean_processes(processes):
import signal, errno
for p in processes:
if (not hasattr(p, 'exitcode') or p.exitcode == None) and (not hasattr(p, 'returncode') or p.returncode == None):
# ask nicely (to try and catch the children)
try:
p.terminate() # SIGTERM
except:
pass
time.sleep(1)
# send a forcible kill immediately afterwards. If the process did not die before, this should clean it.
try:
p.kill() # SIGKILL
except:
pass
def make_relay_server(port1, port2):
print('creating relay server on ports %d,%d' % (port1, port2), file=sys.stderr)
proc = Popen([PYTHON, path_from_root('tests', 'sockets', 'socket_relay.py'), str(port1), str(port2)])
return proc
class WebsockifyServerHarness(object):
def __init__(self, filename, args, listen_port, do_server_check=True):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port-1
self.args = args or []
self.do_server_check = do_server_check
def __enter__(self):
import socket, websockify
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
sp = Popen([CLANG_CC, path_from_root('tests', self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + get_clang_native_args() + self.args, env=get_clang_native_env(), stdout=PIPE, stderr=PIPE)
out = sp.communicate()
print('Socket server build: out:', out[0] or '', '/ err:', out[1] or '')
assert sp.returncode == 0
process = Popen([os.path.abspath('server')])
self.processes.append(process)
# start the websocket proxy
print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.processes.append(self.websockify)
# Make sure both the actual server and the websocket proxy are running
for i in range(10):
try:
if self.do_server_check:
server_sock = socket.create_connection(('localhost', self.target_port), timeout=1)
server_sock.close()
proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1)
proxy_sock.close()
break
except:
time.sleep(1)
else:
clean_processes(self.processes)
raise Exception('[Websockify failed to start up in a timely manner]')
print('[Websockify on process %s]' % str(self.processes[-2:]))
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_processes(self.processes)
class CompiledServerHarness(object):
def __init__(self, filename, args, listen_port):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
child = Popen(NODE_JS + ['-e', 'require("ws");'])
child.communicate()
global node_ws_module_installed
# Attempt to automatically install ws module for Node.js.
if child.returncode != 0 and not node_ws_module_installed:
node_ws_module_installed = True
Popen([NPM, 'install', path_from_root('tests', 'sockets', 'ws')], cwd=os.path.dirname(EMCC)).communicate()
# Did installation succeed?
child = Popen(NODE_JS + ['-e', 'require("ws");'])
child.communicate()
assert child.returncode == 0, 'ws module for Node.js not installed, and automatic installation failed! Please run \'npm install\' from %s' % EMSCRIPTEN_ROOT
# compile the server
sp = Popen([PYTHON, EMCC, path_from_root('tests', self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args)
out = sp.communicate()
print('Socket server build: out:', out[0] or '', '/ err:', out[1] or '')
assert sp.returncode == 0
process = Popen(NODE_JS + ['server.js'])
self.processes.append(process)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_processes(self.processes)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
class sockets(BrowserCore):
emcc_args = []
@classmethod
def setUpClass(self):
super(sockets, self).setUpClass()
print()
print('Running the socket tests. Make sure the browser allows popups from localhost.')
print()
def test_inet(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
printf("*%x,%x,%x,%x,%x,%x*\n", htonl(0xa1b2c3d4), htonl(0xfe3572e0), htonl(0x07abcdf0), htons(0xabcd), ntohl(0x43211234), ntohs(0xbeaf));
in_addr_t i = inet_addr("190.180.10.78");
printf("%x\n", i);
return 0;
}
'''
self.do_run(src, '*d4c3b2a1,e07235fe,f0cdab07,cdab,34122143,afbe*\n4e0ab4be\n')
def test_inet2(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntoa(x));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntoa(x2));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet3(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
int main() {
char dst[64];
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntop(AF_INET,&x,dst,sizeof dst));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntop(AF_INET,&x2,dst,sizeof dst));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet4(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
void test(const char *test_addr, bool first=true){
char str[40];
struct in6_addr addr;
unsigned char *p = (unsigned char*)&addr;
int ret;
ret = inet_pton(AF_INET6,test_addr,&addr);
if(ret == -1) return;
if(ret == 0) return;
if(inet_ntop(AF_INET6,&addr,str,sizeof(str)) == NULL ) return;
printf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x - %s\n",
p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9],p[10],p[11],p[12],p[13],p[14],p[15],str);
if (first) test(str, false); // check again, on our output
}
int main(){
test("::");
test("::1");
test("::1.2.3.4");
test("::17.18.19.20");
test("::ffff:1.2.3.4");
test("1::ffff");
test("::255.255.255.255");
test("0:ff00:1::");
test("0:ff::");
test("abcd::");
test("ffff::a");
test("ffff::a:b");
test("ffff::a:b:c");
test("ffff::a:b:c:d");
test("ffff::a:b:c:d:e");
test("::1:2:0:0:0");
test("0:0:1:2:3::");
test("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
test("1::255.255.255.255");
//below should fail and not produce results..
test("1.2.3.4");
test("");
test("-");
printf("ok.\n");
}
'''
self.do_run(src, r'''0000:0000:0000:0000:0000:0000:0000:0000 - ::
0000:0000:0000:0000:0000:0000:0000:0000 - ::
0000:0000:0000:0000:0000:0000:0000:0001 - ::1
0000:0000:0000:0000:0000:0000:0000:0001 - ::1
0000:0000:0000:0000:0000:0000:0102:0304 - ::102:304
0000:0000:0000:0000:0000:0000:0102:0304 - ::102:304
0000:0000:0000:0000:0000:0000:1112:1314 - ::1112:1314
0000:0000:0000:0000:0000:0000:1112:1314 - ::1112:1314
0000:0000:0000:0000:0000:ffff:0102:0304 - ::ffff:1.2.3.4
0000:0000:0000:0000:0000:ffff:0102:0304 - ::ffff:1.2.3.4
0001:0000:0000:0000:0000:0000:0000:ffff - 1::ffff
0001:0000:0000:0000:0000:0000:0000:ffff - 1::ffff
0000:0000:0000:0000:0000:0000:ffff:ffff - ::ffff:ffff
0000:0000:0000:0000:0000:0000:ffff:ffff - ::ffff:ffff
0000:ff00:0001:0000:0000:0000:0000:0000 - 0:ff00:1::
0000:ff00:0001:0000:0000:0000:0000:0000 - 0:ff00:1::
0000:00ff:0000:0000:0000:0000:0000:0000 - 0:ff::
0000:00ff:0000:0000:0000:0000:0000:0000 - 0:ff::
abcd:0000:0000:0000:0000:0000:0000:0000 - abcd::
abcd:0000:0000:0000:0000:0000:0000:0000 - abcd::
ffff:0000:0000:0000:0000:0000:0000:000a - ffff::a
ffff:0000:0000:0000:0000:0000:0000:000a - ffff::a
ffff:0000:0000:0000:0000:0000:000a:000b - ffff::a:b
ffff:0000:0000:0000:0000:0000:000a:000b - ffff::a:b
ffff:0000:0000:0000:0000:000a:000b:000c - ffff::a:b:c
ffff:0000:0000:0000:0000:000a:000b:000c - ffff::a:b:c
ffff:0000:0000:0000:000a:000b:000c:000d - ffff::a:b:c:d
ffff:0000:0000:0000:000a:000b:000c:000d - ffff::a:b:c:d
ffff:0000:0000:000a:000b:000c:000d:000e - ffff::a:b:c:d:e
ffff:0000:0000:000a:000b:000c:000d:000e - ffff::a:b:c:d:e
0000:0000:0000:0001:0002:0000:0000:0000 - ::1:2:0:0:0
0000:0000:0000:0001:0002:0000:0000:0000 - ::1:2:0:0:0
0000:0000:0001:0002:0003:0000:0000:0000 - 0:0:1:2:3::
0000:0000:0001:0002:0003:0000:0000:0000 - 0:0:1:2:3::
ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff - ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff - ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
0001:0000:0000:0000:0000:0000:ffff:ffff - 1::ffff:ffff
0001:0000:0000:0000:0000:0000:ffff:ffff - 1::ffff:ffff
ok.
''')
def test_getsockname_unconnected_socket(self):
self.do_run(r'''
#include <sys/socket.h>
#include <stdio.h>
#include <assert.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>
int main() {
int fd;
int z;
fd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
struct sockaddr_in adr_inet;
socklen_t len_inet = sizeof adr_inet;
z = getsockname(fd, (struct sockaddr *)&adr_inet, &len_inet);
if (z != 0) {
perror("getsockname error");
return 1;
}
char buffer[1000];
sprintf(buffer, "%s:%u", inet_ntoa(adr_inet.sin_addr), (unsigned)ntohs(adr_inet.sin_port));
const char *correct = "0.0.0.0:0";
printf("got (expected) socket: %s (%s), size %d (%d)\n", buffer, correct, strlen(buffer), strlen(correct));
assert(strlen(buffer) == strlen(correct));
assert(strcmp(buffer, correct) == 0);
puts("success.");
}
''', 'success.')
def test_getpeername_unconnected_socket(self):
self.do_run(r'''
#include <sys/socket.h>
#include <stdio.h>
#include <assert.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>
int main() {
int fd;
int z;
fd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
struct sockaddr_in adr_inet;
socklen_t len_inet = sizeof adr_inet;
z = getpeername(fd, (struct sockaddr *)&adr_inet, &len_inet);
if (z != 0) {
perror("getpeername error");
return 1;
}
puts("unexpected success.");
}
''', 'getpeername error: Socket not connected')
def test_getaddrinfo(self):
self.emcc_args=[]
self.do_run(open(path_from_root('tests', 'sockets', 'test_getaddrinfo.c')).read(), 'success')
def test_getnameinfo(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getnameinfo.c')).read(), 'success')
def test_gethostbyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_gethostbyname.c')).read(), 'success')
def test_getprotobyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getprotobyname.c')).read(), 'success')
def test_link(self):
self.emcc_args += ['-s', 'ERROR_ON_UNDEFINED_SYMBOLS=1']
self.do_run(r'''
#include <netdb.h>
#include <sys/types.h>
#include <sys/socket.h>
int main () {
void* thing = gethostbyname("bing.com");
ssize_t rval = recv (0, thing, 0, 0);
rval = send (0, thing, 0, 0);
return 0;
}''', '', force_c=True)
def test_sockets_echo(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
# Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use consecutive server listen ports,
# because server teardown might not occur deterministically (python dtor time) and is a bit racy.
# WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two.
# CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port addresses. If adding new tests,
# increment the used port addresses below.
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [ (WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49160), 0) ]
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include])
def test_sdl2_sockets_echo(self):
harness = CompiledServerHarness('sdl2_net_server.c', ['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2'], 49164)
with harness:
self.btest('sdl2_net_client.c', expected='0', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port])
def test_sockets_async_echo(self):
# Run with ./runner.py sockets.test_sockets_async_echo
sockets_include = '-I'+path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49167), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49168), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49169), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [ (WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49166), 0) ]
for harness, datagram in harnesses:
print('harness:', harness)
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include])
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
print('expect fail')
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include])
def test_sockets_echo_bigdata(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256*256*2):
message += str(unichr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
input_filename = path_from_root('tests', 'sockets', 'test_sockets_echo_client.c')
input = open(input_filename).read()
output = input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message)
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49172), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49173), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [ (WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49171), 0) ]
for harness, datagram in harnesses:
with harness:
self.btest(output, expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], force_c=True)
def test_sockets_partial(self):
if WINDOWS: return self.skip('This test is Unix-specific.')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_partial_client.c'), expected='165', args=['-DSOCKK=%d' % harness.listen_port])
def test_sockets_select_server_down(self):
if WINDOWS: return self.skip('This test is Unix-specific.')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port])
def test_sockets_select_server_closes_connection_rw(self):
if WINDOWS: return self.skip('This test is Unix-specific.')
sockets_include = '-I'+path_from_root('tests', 'sockets')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port])
def test_enet(self):
if WINDOWS: return self.skip('This test uses Unix-specific build architecture.')
# this is also a good test of raw usage of emconfigure and emmake
try_delete(self.in_dir('enet'))
shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
pwd = os.getcwd()
os.chdir(self.in_dir('enet'))
Popen([PYTHON, path_from_root('emconfigure'), './configure']).communicate()
Popen([PYTHON, path_from_root('emmake'), 'make']).communicate()
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I'+path_from_root('tests', 'enet', 'include')]
os.chdir(pwd)
for harness in [
CompiledServerHarness(os.path.join('sockets', 'test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port])
# This test is no longer in use for WebSockets as we can't truly emulate
# a server in the browser (in the past, there were some hacks to make it
# somewhat work, but those have been removed). However, with WebRTC it
# should be able to resurect this test.
# def test_enet_in_browser(self):
# try_delete(self.in_dir('enet'))
# shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
# pwd = os.getcwd()
# os.chdir(self.in_dir('enet'))
# Popen([PYTHON, path_from_root('emconfigure'), './configure']).communicate()
# Popen([PYTHON, path_from_root('emmake'), 'make']).communicate()
# enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I'+path_from_root('tests', 'enet', 'include')]
# os.chdir(pwd)
# Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_enet_server.c'), '-o', 'server.html', '-DSOCKK=2235'] + enet).communicate()
# with WebsockifyServerHarness('', [], 2235, 2234):
# with WebsockifyServerHarness('', [], 2237, 2236):
# pids = []
# try:
# proc = make_relay_server(2234, 2236)
# pids.append(proc.pid)
# self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=['-DSOCKK=2237', '-DUSE_IFRAME=1'] + enet)
# finally:
# clean_pids(pids);
def test_webrtc(self): # XXX see src/settings.js, this is disabled pending investigation
return self.skip('WebRTC support is not up to date.')
host_src = 'webrtc_host.c'
peer_src = 'webrtc_peer.c'
host_outfile = 'host.html'
peer_outfile = 'peer.html'
host_filepath = path_from_root('tests', 'sockets', host_src)
temp_host_filepath = os.path.join(self.get_dir(), os.path.basename(host_src))
with open(host_filepath) as f: host_src = f.read()
with open(temp_host_filepath, 'w') as f: f.write(self.with_report_result(host_src))
peer_filepath = path_from_root('tests', 'sockets', peer_src)
temp_peer_filepath = os.path.join(self.get_dir(), os.path.basename(peer_src))
with open(peer_filepath) as f: peer_src = f.read()
with open(temp_peer_filepath, 'w') as f: f.write(self.with_report_result(peer_src))
open(os.path.join(self.get_dir(), 'host_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: undefined,
onpeer: function(peer, route) {
window.open('http://localhost:8888/peer.html?' + route);
// iframe = document.createElement("IFRAME");
// iframe.setAttribute("src", "http://localhost:8888/peer.html?" + route);
// iframe.style.display = "none";
// document.body.appendChild(iframe);
peer.listen();
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
},
onerror: function(error) {
console.error(error);
}
},
setStatus: function(text) {
console.log('status: ' + text);
}
};
''')
open(os.path.join(self.get_dir(), 'peer_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: window.location.toString().split('?')[1],
onpeer: function(peer, route) {
peer.connect(Module['webrtc']['session']);
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
// Calling window.close() from this handler hangs my browser, so run it in the next turn
setTimeout(window.close, 0);
},
onerror: function(error) {
console.error(error);
},
},
setStatus: function(text) {
console.log('status: ' + text);
}
};
''')
Popen([PYTHON, EMCC, temp_host_filepath, '-o', host_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'host_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1']).communicate()
Popen([PYTHON, EMCC, temp_peer_filepath, '-o', peer_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'peer_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1']).communicate()
# note: you may need to run this manually yourself, if npm is not in the path, or if you need a version that is not in the path
Popen([NPM, 'install', path_from_root('tests', 'sockets', 'p2p')]).communicate()
broker = Popen(NODE_JS + [path_from_root('tests', 'sockets', 'p2p', 'broker', 'p2p-broker.js')])
expected = '1'
self.run_browser(host_outfile, '.', ['/report_result?' + e for e in expected])
broker.kill();
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
# Run with ./runner.py sockets.test_nodejs_sockets_echo
if NODE_JS not in JS_ENGINES:
return self.skip('node is not present')
sockets_include = '-I'+path_from_root('tests', 'sockets')
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [ (WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0) ]
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print("\nTesting compile time WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166)
]:
with harness:
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-s', 'SOCKET_DEBUG=1', '-s', 'WEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print("\nTesting runtime WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168)
]:
with harness:
open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write('''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-s', 'SOCKET_DEBUG=1', '-DSOCKK=12345'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
|
api.py
|
"""
API
======
"""
import zipfile
try:
import queue
except ImportError:
import Queue as queue
import threading
import datetime
import math
import requests
import mimetypes
import os
try:
from urllib.parse import urlunparse, urlencode, urlparse, parse_qs
except ImportError:
from urllib import urlencode
from urlparse import urlunparse, urlparse, parse_qs
try:
import simplejson as json
except ImportError:
import json
import time
from urllib3.exceptions import ReadTimeoutError
from pyodm.types import NodeOption, NodeInfo, TaskInfo, TaskStatus
from pyodm.exceptions import NodeConnectionError, NodeResponseError, NodeServerError, TaskFailedError, OdmError, RangeNotAvailableError
from pyodm.utils import MultipartEncoder, options_to_json, AtomicCounter
from requests_toolbelt.multipart import encoder
class Node:
"""A client to interact with NodeODM API.
Args:
host (str): Hostname or IP address of processing node
port (int): Port of processing node
token (str): token to use for authentication
timeout (int): timeout value in seconds for network requests
"""
def __init__(self, host, port, token="", timeout=30):
self.host = host
self.port = port
self.token = token
self.timeout = timeout
@staticmethod
def from_url(url, timeout=30):
"""Create a Node instance from a URL.
>>> n = Node.from_url("http://localhost:3000?token=abc")
Args:
url (str): URL in the format proto://hostname:port/?token=value
timeout (int): timeout value in seconds for network requests
Returns:
:func:`~Node`
"""
u = urlparse(url)
qs = parse_qs(u.query)
port = u.port
if port is None:
port = 443 if u.scheme == 'https' else 80
token = ""
if 'token' in qs:
token = qs['token'][0]
return Node(u.hostname, port, token, timeout)
@staticmethod
def compare_version(node_version, compare_version):
# Compare two NodeODM versions
# -1 = node version lower than compare
# 0 = equal
# 1 = node version higher than compare
if node_version is None or len(node_version) < 3:
return -1
if node_version == compare_version:
return 0
try:
(n_major, n_minor, n_build) = map(int, node_version.split("."))
(c_major, c_minor, c_build) = map(int, compare_version.split("."))
except:
return -1
n_number = 1000000 * n_major + 1000 * n_minor + n_build
c_number = 1000000 * c_major + 1000 * c_minor + c_build
if n_number < c_number:
return -1
else:
return 1
def url(self, url, query={}):
"""Get a URL relative to this node.
Args:
url (str): relative URL
query (dict): query values to append to the URL
Returns:
str: Absolute URL
"""
netloc = self.host if (self.port == 80 or self.port == 443) else "{}:{}".format(self.host, self.port)
proto = 'https' if self.port == 443 else 'http'
if len(self.token) > 0:
query['token'] = self.token
return urlunparse((proto, netloc, url, '', urlencode(query), ''))
def get(self, url, query={}, **kwargs):
try:
res = requests.get(self.url(url, query), timeout=self.timeout, **kwargs)
if res.status_code == 401:
raise NodeResponseError("Unauthorized. Do you need to set a token?")
elif not res.status_code in [200, 403, 206]:
raise NodeServerError("Unexpected status code: %s" % res.status_code)
if "Content-Type" in res.headers and "application/json" in res.headers['Content-Type']:
result = res.json()
if 'error' in result:
raise NodeResponseError(result['error'])
return result
else:
return res
except json.decoder.JSONDecodeError as e:
raise NodeServerError(str(e))
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
raise NodeConnectionError(str(e))
def post(self, url, data=None, headers={}):
try:
res = requests.post(self.url(url), data=data, headers=headers, timeout=self.timeout)
if res.status_code == 401:
raise NodeResponseError("Unauthorized. Do you need to set a token?")
elif res.status_code != 200 and res.status_code != 403:
raise NodeServerError(res.status_code)
if "Content-Type" in res.headers and "application/json" in res.headers['Content-Type']:
result = res.json()
if 'error' in result:
raise NodeResponseError(result['error'])
return result
else:
return res
except json.decoder.JSONDecodeError as e:
raise NodeServerError(str(e))
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
raise NodeConnectionError(str(e))
def info(self):
"""Retrieve information about this node.
>>> n = Node('localhost', 3000)
>>> n.info().version
'1.5.3'
>>> n.info().engine
'odm'
Returns:
:func:`~pyodm.types.NodeInfo`
"""
return NodeInfo(self.get('/info'))
def options(self):
"""Retrieve the options available for creating new tasks on this node.
>>> n = Node('localhost', 3000)
>>> n.options()[0].name
'pc-classify'
Returns:
list: [:func:`~pyodm.types.NodeOption`]
"""
return list(map(lambda o: NodeOption(**o), self.get('/options')))
def version_greater_or_equal_than(self, version):
"""Checks whether this node version is greater than or equal than
a certain version number.
>>> n = Node('localhost', 3000)
>>> n.version_greater_or_equal_than('1.3.1')
True
>>> n.version_greater_or_equal_than('10.5.1')
False
Args:
version (str): version number to compare
Returns:
bool: result of comparison.
"""
node_version = self.info().version
return self.compare_version(node_version, version) >= 0
def create_task(self, files, options={}, name=None, progress_callback=None, skip_post_processing=False, webhook=None, outputs=[], parallel_uploads=10, max_retries=5, retry_timeout=5):
"""Start processing a new task.
At a minimum you need to pass a list of image paths. All other parameters are optional.
>>> n = Node('localhost', 3000)
>>> t = n.create_task(['examples/images/image_1.jpg', 'examples/images/image_2.jpg'], \
{'orthophoto-resolution': 2, 'dsm': True})
>>> info = t.info()
>>> info.status
<TaskStatus.RUNNING: 20>
>>> info.last_error
''
>>> t.info().images_count
2
>>> t.output()[0:2]
['DJI_0131.JPG - DJI_0313.JPG has 1 candidate matches', 'DJI_0131.JPG - DJI_0177.JPG has 3 candidate matches']
Args:
files (list): list of image paths + optional GCP file path.
options (dict): options to use, for example {'orthophoto-resolution': 3, ...}
name (str): name for the task
progress_callback (function): callback reporting upload progress percentage
skip_post_processing (bool): When true, skips generation of map tiles, derivate assets, point cloud tiles.
webhook (str): Optional URL to call when processing has ended (either successfully or unsuccessfully).
outputs (list): Optional paths relative to the project directory that should be included in the all.zip result file, overriding the default behavior.
parallel_uploads (int): Number of parallel uploads.
max_retries (int): Number of attempts to make before giving up on a file upload.
retry_timeout (int): Wait at least these many seconds before attempting to upload a file a second time, multiplied by the retry number.
Returns:
:func:`~Task`
"""
if not self.version_greater_or_equal_than("1.4.0"):
return self.create_task_fallback(files, options, name, progress_callback)
if len(files) == 0:
raise NodeResponseError("Not enough images")
fields = {
'name': name,
'options': options_to_json(options),
}
if skip_post_processing:
fields['skipPostProcessing'] = 'true'
if webhook is not None:
fields['webhook'] = webhook
if outputs:
fields['outputs'] = json.dumps(outputs)
e = MultipartEncoder(fields=fields)
result = self.post('/task/new/init', data=e, headers={'Content-Type': e.content_type})
if isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
if isinstance(result, dict) and 'uuid' in result:
uuid = result['uuid']
progress_event = None
class nonloc:
uploaded_files = AtomicCounter(0)
error = None
# Equivalent as passing the open file descriptor, since requests
# eventually calls read(), but this way we make sure to close
# the file prior to reading the next, so we don't run into open file OS limits
def read_file(file_path):
with open(file_path, 'rb') as f:
return f.read()
# Upload
def worker():
while True:
task = q.get()
if task is None or nonloc.error is not None:
q.task_done()
break
# Upload file
if task['wait_until'] > datetime.datetime.now():
time.sleep((task['wait_until'] - datetime.datetime.now()).seconds)
try:
file = task['file']
fields = {
'images': [(os.path.basename(file), read_file(file), (mimetypes.guess_type(file)[0] or "image/jpg"))]
}
e = MultipartEncoder(fields=fields)
result = self.post('/task/new/upload/{}'.format(uuid), data=e, headers={'Content-Type': e.content_type})
if isinstance(result, dict) and 'success' in result and result['success']:
uf = nonloc.uploaded_files.increment()
if progress_event is not None:
progress_event.set()
else:
if isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
else:
raise NodeServerError("Failed upload with unexpected result: %s" % str(result))
except OdmError as e:
if task['retries'] < max_retries:
# Put task back in queue
task['retries'] += 1
task['wait_until'] = datetime.datetime.now() + datetime.timedelta(seconds=task['retries'] * retry_timeout)
q.put(task)
else:
nonloc.error = e
except Exception as e:
nonloc.error = e
finally:
q.task_done()
q = queue.Queue()
threads = []
for i in range(parallel_uploads):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
if progress_callback is not None:
progress_event = threading.Event()
now = datetime.datetime.now()
for file in files:
q.put({
'file': file,
'wait_until': now,
'retries': 0
})
# Wait for progress updates
if progress_event is not None:
current_progress = 0
while not q.empty():
if progress_event.wait(0.1):
progress_event.clear()
current_progress = 100.0 * nonloc.uploaded_files.value / len(files)
try:
progress_callback(current_progress)
except Exception as e:
nonloc.error = e
if nonloc.error is not None:
break
# Make sure to report 100% complete
if current_progress != 100 and nonloc.error is None:
try:
progress_callback(100.0)
except Exception as e:
nonloc.error = e
# block until all tasks are done
if nonloc.error is None:
q.join()
# stop workers
for i in range(parallel_uploads):
q.put(None)
for t in threads:
t.join()
if nonloc.error is not None:
raise nonloc.error
result = self.post('/task/new/commit/{}'.format(uuid))
return self.handle_task_new_response(result)
else:
raise NodeServerError("Invalid response from /task/new/init: %s" % result)
def create_task_fallback(self, files, options={}, name=None, progress_callback=None):
# Pre chunked API create task implementation, used as fallback
if len(files) == 0:
raise NodeResponseError("Not enough images")
# Equivalent as passing the open file descriptor, since requests
# eventually calls read(), but this way we make sure to close
# the file prior to reading the next, so we don't run into open file OS limits
def read_file(file_path):
with open(file_path, 'rb') as f:
return f.read()
fields = {
'name': name,
'options': options_to_json(options),
'images': [(os.path.basename(f), read_file(f), (mimetypes.guess_type(f)[0] or "image/jpg")) for
f in files]
}
def create_callback(mpe):
total_bytes = mpe.len
def callback(monitor):
if progress_callback is not None and total_bytes > 0:
progress_callback(100.0 * monitor.bytes_read / total_bytes)
return callback
e = MultipartEncoder(fields=fields)
m = encoder.MultipartEncoderMonitor(e, create_callback(e))
result = self.post('/task/new', data=m, headers={'Content-Type': m.content_type})
return self.handle_task_new_response(result)
def handle_task_new_response(self, result):
if isinstance(result, dict) and 'uuid' in result:
return Task(self, result['uuid'])
elif isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
else:
raise NodeServerError('Invalid response: ' + str(result))
def get_task(self, uuid):
"""Helper method to initialize a task from an existing UUID
>>> n = Node("localhost", 3000)
>>> t = n.get_task('00000000-0000-0000-0000-000000000000')
>>> t.__class__
<class 'pyodm.api.Task'>
Args:
uuid: Unique identifier of the task
"""
return Task(self, uuid)
class Task:
"""A task is created to process images. To create a task, use :func:`~Node.create_task`.
Args:
node (:func:`~Node`): node this task belongs to
uuid (str): Unique identifier assigned to this task.
"""
def __init__(self, node, uuid):
self.node = node
self.uuid = uuid
def get(self, url, query = {}, **kwargs):
result = self.node.get(url, query, **kwargs)
if isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
return result
def post(self, url, data):
result = self.node.post(url, data)
if isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
return result
def info(self, with_output=None):
"""Retrieves information about this task.
Returns:
:func:`~pyodm.types.TaskInfo`
"""
query = {}
if with_output is not None:
query['with_output'] = with_output
return TaskInfo(self.get('/task/{}/info'.format(self.uuid), query))
def output(self, line=0):
"""Retrieve console task output.
Args:
line (int): Optional line number that the console output should be truncated from. For example, passing a value of 100 will retrieve the console output starting from line 100. Negative numbers are also allowed. For example -50 will retrieve the last 50 lines of console output. Defaults to 0 (retrieve all console output).
Returns:
[str]: console output (one list item per row).
"""
return self.get('/task/{}/output'.format(self.uuid), {'line': line})
def cancel(self):
"""Cancel this task.
Returns:
bool: task was canceled or not
"""
return self.post('/task/cancel', {'uuid': self.uuid}).get('success', False)
def remove(self):
"""Remove this task.
Returns:
bool: task was removed or not
"""
return self.post('/task/remove', {'uuid': self.uuid}).get('success', False)
def restart(self, options=None):
"""Restart this task.
Args:
options (dict): options to use, for example {'orthophoto-resolution': 3, ...}
Returns:
bool: task was restarted or not
"""
data = {'uuid': self.uuid}
if options is not None: data['options'] = options_to_json(options)
return self.post('/task/restart', data).get('success', False)
def download_zip(self, destination, progress_callback=None, parallel_downloads=16, parallel_chunks_size=10):
"""Download this task's assets archive to a directory.
Args:
destination (str): directory where to download assets archive. If the directory does not exist, it will be created.
progress_callback (function): an optional callback with one parameter, the download progress percentage.
parallel_downloads (int): maximum number of parallel downloads if the node supports http range.
parallel_chunks_size (int): size in MB of chunks for parallel downloads
Returns:
str: path to archive file (.zip)
"""
info = self.info()
if info.status != TaskStatus.COMPLETED:
raise NodeResponseError("Cannot download task, task status is " + str(info.status))
if not os.path.exists(destination):
os.makedirs(destination, exist_ok=True)
try:
download_stream = self.get('/task/{}/download/all.zip'.format(self.uuid), stream=True)
headers = download_stream.headers
zip_path = os.path.join(destination, "{}_{}_all.zip".format(self.uuid, int(time.time())))
# Keep track of download progress (if possible)
content_length = download_stream.headers.get('content-length')
total_length = int(content_length) if content_length is not None else None
downloaded = 0
chunk_size = int(parallel_chunks_size * 1024 * 1024)
use_fallback = False
accept_ranges = headers.get('accept-ranges')
# Can we do parallel downloads?
if accept_ranges is not None and accept_ranges.lower() == 'bytes' and total_length is not None and total_length > chunk_size and parallel_downloads > 1:
num_chunks = int(math.ceil(total_length / float(chunk_size)))
num_workers = parallel_downloads
class nonloc:
completed_chunks = AtomicCounter(0)
merge_chunks = [False] * num_chunks
error = None
def merge():
current_chunk = 0
with open(zip_path, "wb") as out_file:
while current_chunk < num_chunks and nonloc.error is None:
if nonloc.merge_chunks[current_chunk]:
chunk_file = "%s.part%s" % (zip_path, current_chunk)
with open(chunk_file, "rb") as fd:
out_file.write(fd.read())
os.unlink(chunk_file)
current_chunk += 1
else:
time.sleep(0.1)
def worker():
while True:
task = q.get()
part_num, bytes_range = task
if bytes_range is None or nonloc.error is not None:
q.task_done()
break
try:
# Download chunk
res = self.get('/task/{}/download/all.zip'.format(self.uuid), stream=True, headers={'Range': 'bytes=%s-%s' % bytes_range})
if res.status_code == 206:
with open("%s.part%s" % (zip_path, part_num), 'wb') as fd:
for chunk in res.iter_content(4096):
fd.write(chunk)
with nonloc.completed_chunks.lock:
nonloc.completed_chunks.value += 1
if progress_callback is not None:
progress_callback(100.0 * nonloc.completed_chunks.value / num_chunks)
nonloc.merge_chunks[part_num] = True
else:
nonloc.error = RangeNotAvailableError()
except OdmError as e:
time.sleep(5)
q.put((part_num, bytes_range))
except Exception as e:
nonloc.error = e
finally:
q.task_done()
q = queue.PriorityQueue()
threads = []
for i in range(num_workers):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
merge_thread = threading.Thread(target=merge)
merge_thread.start()
range_start = 0
for i in range(num_chunks):
range_end = min(range_start + chunk_size - 1, total_length - 1)
q.put((i, (range_start, range_end)))
range_start = range_end + 1
# block until all tasks are done
q.join()
# stop workers
for i in range(len(threads)):
q.put((-1, None))
for t in threads:
t.join()
merge_thread.join()
if nonloc.error is not None:
if isinstance(nonloc.error, RangeNotAvailableError):
use_fallback = True
else:
raise nonloc.error
else:
use_fallback = True
if use_fallback:
# Single connection, boring download
with open(zip_path, 'wb') as fd:
for chunk in download_stream.iter_content(4096):
downloaded += len(chunk)
if progress_callback is not None and total_length is not None:
progress_callback((100.0 * float(downloaded) / total_length))
fd.write(chunk)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, ReadTimeoutError) as e:
raise NodeConnectionError(e)
return zip_path
def download_assets(self, destination, progress_callback=None, parallel_downloads=16, parallel_chunks_size=10):
"""Download this task's assets to a directory.
Args:
destination (str): directory where to download assets. If the directory does not exist, it will be created.
progress_callback (function): an optional callback with one parameter, the download progress percentage
parallel_downloads (int): maximum number of parallel downloads if the node supports http range.
parallel_chunks_size (int): size in MB of chunks for parallel downloads
Returns:
str: path to saved assets
"""
zip_path = self.download_zip(destination, progress_callback=progress_callback, parallel_downloads=parallel_downloads, parallel_chunks_size=parallel_chunks_size)
with zipfile.ZipFile(zip_path, "r") as zip_h:
zip_h.extractall(destination)
os.remove(zip_path)
return destination
def wait_for_completion(self, status_callback=None, interval=3, max_retries=5, retry_timeout=5):
"""Wait for the task to complete. The call will block until the task status has become
:func:`~TaskStatus.COMPLETED`. If the status is set to :func:`~TaskStatus.CANCELED` or :func:`~TaskStatus.FAILED`
it raises a TaskFailedError exception.
Args:
status_callback (function): optional callback that will be called with task info updates every interval seconds.
interval (int): seconds between status checks.
max_retries (int): number of repeated attempts that should be made to receive a status update before giving up.
retry_timeout (int): wait N*retry_timeout between attempts, where N is the attempt number.
"""
retry = 0
while True:
try:
info = self.info()
except NodeConnectionError as e:
if retry < max_retries:
retry += 1
time.sleep(retry * retry_timeout)
continue
else:
raise e
retry = 0
if status_callback is not None:
status_callback(info)
if info.status in [TaskStatus.COMPLETED, TaskStatus.CANCELED, TaskStatus.FAILED]:
break
time.sleep(interval)
if info.status in [TaskStatus.FAILED, TaskStatus.CANCELED]:
raise TaskFailedError(info.status)
|
main.py
|
import threading
NUM_THREAD = 10
printed = False
def print_text():
print ("printed once")
threads = []
for i in range (NUM_THREAD):
t = threading.Thread (target=print_text)
threads.append(t)
t.start()
for i in range (NUM_THREAD):
threads[i].join()
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import signal
import sys
import threading
import warnings
import importlib
from threading import RLock
from tempfile import NamedTemporaryFile
from types import TracebackType
from typing import (
Any,
Callable,
cast,
ClassVar,
Dict,
Iterable,
List,
NoReturn,
Optional,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
)
from py4j.java_collections import JavaMap
from py4j.protocol import Py4JError
from pyspark import accumulators, since
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway, local_connect_and_auth
from pyspark.serializers import (
CPickleSerializer,
BatchedSerializer,
Serializer,
UTF8Deserializer,
PairDeserializer,
AutoBatchedSerializer,
NoOpSerializer,
ChunkedStream,
)
from pyspark.storagelevel import StorageLevel
from pyspark.resource.information import ResourceInformation
from pyspark.rdd import RDD, _load_from_socket # type: ignore[attr-defined]
from pyspark.taskcontext import TaskContext
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler, UDFBasicProfiler
from py4j.java_gateway import is_instance_of, JavaGateway, JavaObject, JVMView
if TYPE_CHECKING:
from pyspark.accumulators import AccumulatorParam
__all__ = ["SparkContext"]
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS: Dict[str, Any] = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
T = TypeVar("T")
U = TypeVar("U")
class SparkContext:
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create :class:`RDD` and
broadcast variables on that cluster.
When you create a new SparkContext, at least the master and app name should
be set, either through the named parameters here or through `conf`.
Parameters
----------
master : str, optional
Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
appName : str, optional
A name for your job, to display on the cluster web UI.
sparkHome : str, optional
Location where Spark is installed on cluster nodes.
pyFiles : list, optional
Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
environment : dict, optional
A dictionary of environment variables to set on
worker nodes.
batchSize : int, optional
The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
serializer : :class:`pyspark.serializers.Serializer`, optional
The serializer for RDDs.
conf : :py:class:`pyspark.SparkConf`, optional
An object setting Spark properties.
gateway : :py:class:`py4j.java_gateway.JavaGateway`, optional
Use an existing gateway and JVM, otherwise a new JVM
will be instantiated. This is only used internally.
jsc : :py:class:`py4j.java_gateway.JavaObject`, optional
The JavaSparkContext instance. This is only used internally.
profiler_cls : type, optional
A class of custom Profiler used to do profiling
(default is :class:`pyspark.profiler.BasicProfiler`).
udf_profiler_cls : type, optional
A class of custom Profiler used to do udf profiling
(default is :class:`pyspark.profiler.UDFBasicProfiler`).
Notes
-----
Only one :class:`SparkContext` should be active per JVM. You must `stop()`
the active :class:`SparkContext` before creating a new one.
:class:`SparkContext` instance is not supported to share across multiple
processes out of the box, and PySpark does not guarantee multi-processing execution.
Use threads instead for concurrent processing purpose.
Examples
--------
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: ...
"""
_gateway: ClassVar[Optional[JavaGateway]] = None
_jvm: ClassVar[Optional[JVMView]] = None
_next_accum_id = 0
_active_spark_context: ClassVar[Optional["SparkContext"]] = None
_lock = RLock()
_python_includes: Optional[
List[str]
] = None # zip and egg files that need to be added to PYTHONPATH
serializer: Serializer
profiler_collector: ProfilerCollector
PACKAGE_EXTENSIONS: Iterable[str] = (".zip", ".egg", ".jar")
def __init__(
self,
master: Optional[str] = None,
appName: Optional[str] = None,
sparkHome: Optional[str] = None,
pyFiles: Optional[List[str]] = None,
environment: Optional[Dict[str, Any]] = None,
batchSize: int = 0,
serializer: "Serializer" = CPickleSerializer(),
conf: Optional[SparkConf] = None,
gateway: Optional[JavaGateway] = None,
jsc: Optional[JavaObject] = None,
profiler_cls: Type[BasicProfiler] = BasicProfiler,
udf_profiler_cls: Type[UDFBasicProfiler] = UDFBasicProfiler,
):
if (
conf is None
or cast(str, conf.get("spark.executor.allowSparkContext", "false")).lower() != "true"
):
# In order to prevent SparkContext from being created in executors.
SparkContext._assert_on_driver()
self._callsite = first_spark_call() or CallSite(None, None, None)
if gateway is not None and gateway.gateway_parameters.auth_token is None:
raise ValueError(
"You are trying to pass an insecure Py4j gateway to Spark. This"
" is not allowed as it is a security risk."
)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(
master,
appName,
sparkHome,
pyFiles,
environment,
batchSize,
serializer,
conf,
jsc,
profiler_cls,
udf_profiler_cls,
)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(
self,
master: Optional[str],
appName: Optional[str],
sparkHome: Optional[str],
pyFiles: Optional[List[str]],
environment: Optional[Dict[str, Any]],
batchSize: int,
serializer: Serializer,
conf: Optional[SparkConf],
jsc: JavaObject,
profiler_cls: Type[BasicProfiler] = BasicProfiler,
udf_profiler_cls: Type[UDFBasicProfiler] = UDFBasicProfiler,
) -> None:
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise RuntimeError("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise RuntimeError("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv.") :]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
assert self._gateway is not None
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token) # type: ignore[attr-defined]
(host, port) = self._accumulatorServer.server_address
assert self._jvm is not None
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
# If encryption is enabled, we need to setup a server in the jvm to read broadcast
# data via a socket.
# scala's mangled names w/ $ in them require special treatment.
self._encryption_enabled = self._jvm.PythonUtils.isEncryptionEnabled(self._jsc)
os.environ["SPARK_AUTH_SOCKET_TIMEOUT"] = str(
self._jvm.PythonUtils.getPythonAuthSocketTimeout(self._jsc)
)
os.environ["SPARK_BUFFER_SIZE"] = str(self._jvm.PythonUtils.getSparkBufferSize(self._jsc))
self.pythonExec = os.environ.get("PYSPARK_PYTHON", "python3")
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in pyFiles or []:
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in cast(str, self._conf.get("spark.submit.pyFiles", "")).split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] specified in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning,
)
# Create a temporary directory inside spark.local.dir:
assert self._jvm is not None
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = self._jvm.org.apache.spark.util.Utils.createTempDir(
local_dir, "pyspark"
).getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, udf_profiler_cls, dump_path)
else:
self.profiler_collector = None # type: ignore[assignment]
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal: Any, frame: Any) -> NoReturn:
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined]
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self) -> str:
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self) -> str:
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf: JavaObject) -> JavaObject:
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
assert self._jvm is not None
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(
cls,
instance: Optional["SparkContext"] = None,
gateway: Optional[JavaGateway] = None,
conf: Optional[SparkConf] = None,
) -> None:
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (
SparkContext._active_spark_context
and SparkContext._active_spark_context != instance
):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (
currentAppName,
currentMaster,
callsite.function,
callsite.file,
callsite.linenum,
)
)
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self) -> NoReturn:
# This method is called when attempting to pickle SparkContext, which is always an error:
raise RuntimeError(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self) -> "SparkContext":
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(
self,
type: Optional[Type[BaseException]],
value: Optional[BaseException],
trace: Optional[TracebackType],
) -> None:
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf: Optional[SparkConf] = None) -> "SparkContext":
"""
Get or instantiate a SparkContext and register it as a singleton object.
Parameters
----------
conf : :py:class:`pyspark.SparkConf`, optional
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
assert SparkContext._active_spark_context is not None
return SparkContext._active_spark_context
def setLogLevel(self, logLevel: str) -> None:
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key: str, value: str) -> None:
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
assert SparkContext._jvm is not None
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self) -> str:
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
def applicationId(self) -> str:
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
Examples
--------
>>> sc.applicationId # doctest: +ELLIPSIS
'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self) -> str:
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self) -> int:
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self) -> int:
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self) -> int:
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self) -> None:
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
"Unable to cleanly shutdown Spark JVM process."
" It is possible that the process has crashed,"
" been killed or may also be in a zombie state.",
RuntimeWarning,
)
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None # type: ignore[assignment]
def emptyRDD(self) -> "RDD[Any]":
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(
self, start: int, end: Optional[int] = None, step: int = 1, numSlices: Optional[int] = None
) -> "RDD[int]":
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numSlices : int, optional
the number of partitions of the new RDD
Returns
-------
:py:class:`pyspark.RDD`
An RDD of int
Examples
--------
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(range(start, end, step), numSlices)
def parallelize(self, c: Iterable[T], numSlices: Optional[int] = None) -> "RDD[T]":
"""
Distribute a local Python collection to form an RDD. Using range
is recommended if the input represents a range for performance.
Examples
--------
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(range(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, range):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1 # type: ignore[index]
start0 = c[0] # type: ignore[index]
def getStart(split: int) -> int:
assert numSlices is not None
return start0 + int((split * size / numSlices)) * step
def f(split: int, iterator: Iterable[T]) -> Iterable:
# it's an empty iterator here but we need this line for triggering the
# logic of signal handling in FramedSerializer.load_stream, for instance,
# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
# FramedSerializer.load_stream produces a generator, the control should
# at least be in that function once. Here we do it by explicitly converting
# the empty iterator to a list, thus make sure worker reuse takes effect.
# See more details in SPARK-26549.
assert len(list(iterator)) == 0
return range(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024)) # type: ignore[arg-type]
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename: str) -> JavaObject:
assert self._jvm is not None
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer() -> JavaObject:
assert self._jvm is not None
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(
self,
data: Iterable[T],
serializer: Serializer,
reader_func: Callable,
createRDDServer: Callable,
) -> JavaObject:
"""
Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
Examples
--------
data
object to be serialized
serializer : :py:class:`pyspark.serializers.Serializer`
reader_func : function
A function which takes a filename and reads in the data in the jvm and
returns a JavaRDD. Only used when encryption is disabled.
createRDDServer : function
A function which creates a PythonRDDServer in the jvm to
accept the serialized data, for use when encryption is enabled.
"""
if self._encryption_enabled:
# with encryption, we open a server in java and send the data directly
server = createRDDServer()
(sock_file, _) = local_connect_and_auth(server.port(), server.secret())
chunked_out = ChunkedStream(sock_file, 8192)
serializer.dump_stream(data, chunked_out)
chunked_out.close()
# this call will block until the server has read all the data and processed it (or
# throws an exception)
r = server.getResult()
return r
else:
# without encryption, we serialize to a file, and we read the file in java and
# parallelize from there.
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
try:
serializer.dump_stream(data, tempFile)
finally:
tempFile.close()
return reader_func(tempFile.name)
finally:
# we eagerly reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name: str, minPartitions: Optional[int] = None) -> "RDD[Any]":
"""
Load an RDD previously saved using :meth:`RDD.saveAsPickleFile` method.
Examples
--------
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
def textFile(
self, name: str, minPartitions: Optional[int] = None, use_unicode: bool = True
) -> "RDD[str]":
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
The text files must be encoded as UTF-8.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
Examples
--------
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self, UTF8Deserializer(use_unicode))
def wholeTextFiles(
self, path: str, minPartitions: Optional[int] = None, use_unicode: bool = True
) -> "RDD[Tuple[str, str]]":
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
The text files must be encoded as UTF-8.
If `use_unicode` is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files:
.. code-block:: text
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do ``rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")``,
then ``rdd`` contains:
.. code-block:: text
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
Notes
-----
Small files are preferred, as each file will be loaded fully in memory.
Examples
--------
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[('.../1.txt', '1'), ('.../2.txt', '2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(
self._jsc.wholeTextFiles(path, minPartitions),
self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)),
)
def binaryFiles(
self, path: str, minPartitions: Optional[int] = None
) -> "RDD[Tuple[str, bytes]]":
"""
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
Notes
-----
Small files are preferred, large file is also allowable, but may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(
self._jsc.binaryFiles(path, minPartitions),
self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()),
)
def binaryRecords(self, path: str, recordLength: int) -> "RDD[bytes]":
"""
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
Parameters
----------
path : str
Directory to the input data files
recordLength : int
The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d: Optional[Dict[str, str]]) -> JavaMap:
assert self._jvm is not None
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(
self,
path: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
minSplits: Optional[int] = None,
batchSize: int = 0,
) -> "RDD[Tuple[T, U]]":
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pickle pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. :class:`CPickleSerializer` is used to deserialize pickled objects on the Python side
Parameters
----------
path : str
path to sequencefile
keyClass: str, optional
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
valueConverter : str, optional
fully qualifiedname of a function returning value WritableConverter
minSplits : int, optional
minimum splits in dataset (default min(2, sc.defaultParallelism))
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
assert self._jvm is not None
jrdd = self._jvm.PythonRDD.sequenceFile(
self._jsc,
path,
keyClass,
valueClass,
keyConverter,
valueConverter,
minSplits,
batchSize,
)
return RDD(jrdd, self)
def newAPIHadoopFile(
self,
path: str,
inputFormatClass: str,
keyClass: str,
valueClass: str,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
batchSize: int = 0,
) -> "RDD[Tuple[T, U]]":
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
Parameters
----------
path : str
path to Hadoop file
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
None by default
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
None by default
conf : dict, optional
Hadoop configuration, passed in as a dict
None by default
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
assert self._jvm is not None
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(
self._jsc,
path,
inputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
batchSize,
)
return RDD(jrdd, self)
def newAPIHadoopRDD(
self,
inputFormatClass: str,
keyClass: str,
valueClass: str,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
batchSize: int = 0,
) -> "RDD[Tuple[T, U]]":
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
Parameters
----------
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
assert self._jvm is not None
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(
self._jsc,
inputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
batchSize,
)
return RDD(jrdd, self)
def hadoopFile(
self,
path: str,
inputFormatClass: str,
keyClass: str,
valueClass: str,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
batchSize: int = 0,
) -> "RDD[Tuple[T, U]]":
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
path : str
path to Hadoop file
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
assert self._jvm is not None
jrdd = self._jvm.PythonRDD.hadoopFile(
self._jsc,
path,
inputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
batchSize,
)
return RDD(jrdd, self)
def hadoopRDD(
self,
inputFormatClass: str,
keyClass: str,
valueClass: str,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
batchSize: int = 0,
) -> "RDD[Tuple[T, U]]":
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
Parameters
----------
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
assert self._jvm is not None
jrdd = self._jvm.PythonRDD.hadoopRDD(
self._jsc,
inputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
batchSize,
)
return RDD(jrdd, self)
def _checkpointFile(self, name: str, input_deserializer: PairDeserializer) -> RDD:
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
def union(self, rdds: List["RDD[T]"]) -> "RDD[T]":
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
Examples
--------
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
['Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer # type: ignore[attr-defined]
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds): # type: ignore[attr-defined]
rdds = [x._reserialize() for x in rdds] # type: ignore[attr-defined]
gw = SparkContext._gateway
assert gw is not None
jvm = SparkContext._jvm
assert jvm is not None
jrdd_cls = jvm.org.apache.spark.api.java.JavaRDD
jpair_rdd_cls = jvm.org.apache.spark.api.java.JavaPairRDD
jdouble_rdd_cls = jvm.org.apache.spark.api.java.JavaDoubleRDD
if is_instance_of(gw, rdds[0]._jrdd, jrdd_cls): # type: ignore[attr-defined]
cls = jrdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jpair_rdd_cls): # type: ignore[attr-defined]
cls = jpair_rdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jdouble_rdd_cls): # type: ignore[attr-defined]
cls = jdouble_rdd_cls
else:
cls_name = rdds[0]._jrdd.getClass().getCanonicalName() # type: ignore[attr-defined]
raise TypeError("Unsupported Java RDD class %s" % cls_name)
jrdds = gw.new_array(cls, len(rdds))
for i in range(0, len(rdds)):
jrdds[i] = rdds[i]._jrdd # type: ignore[attr-defined]
return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer) # type: ignore[attr-defined]
def broadcast(self, value: T) -> "Broadcast[T]":
"""
Broadcast a read-only variable to the cluster, returning a :class:`Broadcast`
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(
self, value: T, accum_param: Optional["AccumulatorParam[T]"] = None
) -> "Accumulator[T]":
"""
Create an :class:`Accumulator` with the given initial value, using a given
:class:`AccumulatorParam` helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM # type: ignore[attr-defined]
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM # type: ignore[attr-defined]
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM # type: ignore[attr-defined]
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path: str, recursive: bool = False) -> None:
"""
Add a file to be downloaded with this Spark job on every node.
The `path` passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use :meth:`SparkFiles.get` with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
Notes
-----
A path can be added only once. Subsequent additions of the same path are ignored.
Examples
--------
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path: str) -> None:
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The `path` passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
Notes
-----
A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
assert self._python_includes is not None
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
importlib.invalidate_caches()
def setCheckpointDir(self, dirName: str) -> None:
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be an HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
@since(3.1)
def getCheckpointDir(self) -> Optional[str]:
"""
Return the directory where RDDs are checkpointed. Returns None if no
checkpoint directory has been set.
"""
if not self._jsc.sc().getCheckpointDir().isEmpty():
return self._jsc.sc().getCheckpointDir().get()
return None
def _getJavaStorageLevel(self, storageLevel: StorageLevel) -> JavaObject:
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise TypeError("storageLevel must be of type pyspark.StorageLevel")
assert self._jvm is not None
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(
storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication,
)
def setJobGroup(self, groupId: str, description: str, interruptOnCancel: bool = False) -> None:
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use :meth:`SparkContext.cancelJobGroup` to cancel all
running jobs in this group.
Notes
-----
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread
local inheritance, and preventing resource leak.
Examples
--------
>>> import threading
>>> from time import sleep
>>> from pyspark import InheritableThread
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise RuntimeError("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = InheritableThread(target=start_job, args=(10,)).start()
>>> suppress = InheritableThread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key: str, value: str) -> None:
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
Notes
-----
If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread
local inheritance, and preventing resource leak.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key: str) -> Optional[str]:
"""
Get a local property set in this thread, or null if it is missing. See
:meth:`setLocalProperty`.
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value: str) -> None:
"""
Set a human readable description of the current job.
Notes
-----
If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread
local inheritance, and preventing resource leak.
"""
self._jsc.setJobDescription(value)
def sparkUser(self) -> str:
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId: str) -> None:
"""
Cancel active jobs for the specified group. See :meth:`SparkContext.setJobGroup`.
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self) -> None:
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self) -> StatusTracker:
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(
self,
rdd: "RDD[T]",
partitionFunc: Callable[[Iterable[T]], Iterable[U]],
partitions: Optional[List[int]] = None,
allowLocal: bool = False,
) -> List[U]:
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
Examples
--------
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = list(range(rdd._jrdd.partitions().size())) # type: ignore[attr-defined]
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
assert self._jvm is not None
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions) # type: ignore[attr-defined]
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer)) # type: ignore[attr-defined]
def show_profiles(self) -> None:
"""Print the profile stats to stdout"""
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError(
"'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile."
)
def dump_profiles(self, path: str) -> None:
"""Dump the profile stats into directory `path`"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError(
"'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile."
)
def getConf(self) -> SparkConf:
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
@property
def resources(self) -> Dict[str, ResourceInformation]:
resources = {}
jresources = self._jsc.resources()
for x in jresources:
name = jresources[x].name()
jaddresses = jresources[x].addresses()
addrs = [addr for addr in jaddresses]
resources[name] = ResourceInformation(name, addrs)
return resources
@staticmethod
def _assert_on_driver() -> None:
"""
Called to ensure that SparkContext is created only on the Driver.
Throws an exception if a SparkContext is about to be created in executors.
"""
if TaskContext.get() is not None:
raise RuntimeError("SparkContext should only be created and accessed on the driver.")
def _test() -> None:
import atexit
import doctest
import tempfile
globs = globals().copy()
globs["sc"] = SparkContext("local[4]", "PythonTest")
globs["tempdir"] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs["tempdir"]))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
rpc_test.py
|
import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import IS_MACOS, load_tests, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
rpc.WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor():
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
return torch.sparse_coo_tensor(i, v, (2, 3))
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
# Copied from test/test_cuda.py.
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class RpcTest(RpcAgentTestFixture):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1 + 3)
def _test_self_remote_rref_as_rpc_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, torch.ones(2, 2) + 1))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2) + 1)
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2))
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_rpc_arg(dst)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._test_self_remote_rref_as_rpc_arg(rpc.get_worker_info())
def _test_self_remote_rref_as_remote_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
self.assertEqual(
ret_rref.to_here(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2)
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_remote_arg(dst)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._test_self_remote_rref_as_remote_arg(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
def test_world_size_one(self):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
expect = torch.ones(2, 2) * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(expect, result)
expect = torch.ones(3, 3) * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(torch.ones(3, 3), torch.ones(3, 3))
).wait()
self.assertEqual(expect, result)
expect = torch.ones(4, 4) * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(torch.ones(4, 4), torch.ones(4, 4))
).to_here()
self.assertEqual(expect, result)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_invalid_names(self):
from torch.distributed.rpc import WorkerInfo
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = []
for i in range(20):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def _run_uneven_workload(self, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def test_wait_all_workers(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def test_wait_all_workers_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload()
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with _profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
nested_rpc,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(rref.to_here(), torch.ones(n, n) * 2)
@dist_init
def test_builtin_remote_self(self):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(torch.ones(2, 2), torch.ones(2, 2)),
)
self.assertEqual(rref.local_value(), torch.ones(2, 2) * 2)
def _test_multi_remote_call(self, fn, args_fn=lambda x: (), kwargs_fn=lambda x: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n),
kwargs=kwargs_fn(n),
)
)
expected.append(fn(*args_fn(n), **kwargs_fn(n)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
@dist_init
def test_multi_builtin_remote_ret(self):
def args_fn(n):
return (torch.ones(n, n), torch.ones(n, n))
self._test_multi_remote_call(torch.add, args_fn=args_fn)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@dist_init
def test_multi_py_udf_remote(self):
def kwargs_fn(n):
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
self._test_multi_remote_call(my_function, kwargs_fn=kwargs_fn)
@dist_init
def test_py_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rref_args_user_share(self):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rpc_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, torch.ones(n, n) + 4)
@dist_init
def test_nested_remote(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
nested_remote,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 3)
@dist_init
def test_nested_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_nested_rref_stress(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up, for non ProcessGroupAgent backends.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
RuntimeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
|
main.py
|
import pickle
from json import dumps
from time import sleep
import os
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from threading import Thread
import logging
import sys
import socket
import errno
from kivy import Logger
from _thread import interrupt_main
# --------Binary File Checker----------#
text_chars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7f})
is_binary = lambda byte: bool(byte.translate(None, text_chars))
# --------Binary File Checker----------#
class KivyFileListener(FileSystemEventHandler):
def __init__(self):
self.client_socket = KivyLiveClient()
self.filepath = ""
def on_any_event(self, event):
pass
def on_modified(self, event):
filename = os.path.basename(self.filepath).strip("~")
if filename == "main.py" or not self.filepath:
return
binary = is_binary(open(self.filepath, "rb").read(1024))
with open(self.filepath, "rb" if binary else "r") as file:
code_data = pickle.dumps({"file": os.path.relpath(self.filepath), "code": file.read()})
self.client_socket.send_code(
f"{len(code_data):<{self.client_socket.HEADER_LENGTH}}".encode("utf-8") + code_data
)
def on_created(self, event):
self.filepath = event.src_path.strip("~")
def on_closed(self, event):
pass
def on_moved(self, event):
pass
def on_deleted(self, event):
pass
class KivyLiveClient:
def __init__(self, **kwargs):
self.HEADER_LENGTH = 64
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect(("0.0.0.0", 6051))
# self.client_socket.setblocking(False)
Thread(target=self.recv_code).start()
def send_code(self, code_data):
self.client_socket.send(code_data)
def recv_code(self):
_header = self.client_socket.recv(self.HEADER_LENGTH)
load_initial_code = pickle.loads(self.client_socket.recv(int(_header)))
for i in load_initial_code:
file_path = os.path.split(i)[0]
try:
os.makedirs(file_path)
except FileExistsError as e:
Logger.debug(f"{e} : Ignore this")
if os.path.split(i)[1] == "main.py":
with open(
os.path.join(file_path, "liveappmain.py"),
"wb" if type(load_initial_code[i]) == bytes else "w"
) as f:
f.write(load_initial_code[i])
else:
with open(
os.path.join(file_path, os.path.split(i)[1]),
"wb" if type(load_initial_code[i]) == bytes else "w"
) as f:
f.write(load_initial_code[i])
try:
while True:
header = self.client_socket.recv(self.HEADER_LENGTH)
if not len(header):
Logger.info("SERVER DOWN: Shutting down the connection")
break
message_length = int(header)
code_data = self.client_socket.recv(message_length).decode()
self.update_code(code_data)
interrupt_main()
except KeyboardInterrupt:
pass
except:
Logger.info("SERVER DOWN: Shutting down the connection")
interrupt_main()
@staticmethod
def update_code(code_data):
# write code
global path
file = code_data["data"]["file"]
if os.path.abspath(".") == os.getcwd():
with open(os.path.join(path, file) if file != "main.py" else "liveappmain.py", "w") as f:
f.write(code_data["data"]["code"])
else:
with open(os.path.join(path, file), "w") as f:
f.write(code_data["data"]["code"])
Logger.info(f"FILE UPDATE: {file} was updated by {code_data['address']}")
if __name__ == "__main__":
try:
path = sys.argv[1]
except IndexError:
logging.error("add a directory. e.g: python main.py /path/to/file")
exit()
observer = Observer()
observer.schedule(KivyFileListener(), path=path, recursive=True)
observer.start()
try:
while True:
sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
test_hdfs3.py
|
from __future__ import unicode_literals
import io
import multiprocessing
import os
import posixpath
import tempfile
import sys
from random import randint
try:
from queue import Queue
except ImportError:
from Queue import Queue
from threading import Thread
import traceback
import pytest
from hdfs3 import HDFileSystem, lib
from hdfs3.utils import ensure_bytes, ensure_string
from hdfs3.conf import conf_to_dict
from hdfs3.compatibility import bytes, unicode, ConnectionError
from hdfs3.utils import tmpfile
test_host = 'localhost'
test_port = 8020
@pytest.yield_fixture
def hdfs():
hdfs = HDFileSystem(host=test_host, port=test_port,
pars={'rpc.client.connect.retry': '2'})
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test')
hdfs.mkdir('/tmp/test')
yield hdfs
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test', recursive=True)
hdfs.disconnect()
a = '/tmp/test/a'
b = '/tmp/test/b'
c = '/tmp/test/c'
d = '/tmp/test/d'
def test_simple(hdfs):
data = b'a' * (10 * 2**20)
with hdfs.open(a, 'wb', replication=1) as f:
f.write(data)
with hdfs.open(a, 'rb') as f:
out = f.read(len(data))
assert len(data) == len(out)
assert out == data
@pytest.mark.slow
def test_connection_error():
with pytest.raises(ConnectionError) as ctx:
hdfs = HDFileSystem(host='localhost', port=9999, connect=False,
pars={'rpc.client.connect.retry': '1'})
hdfs.CONNECT_RETRIES = 1
hdfs.connect()
# error message is long and with java exceptions, so here we just check
# that important part of error is present
msg = 'Caused by: HdfsNetworkConnectException: Connect to "localhost:9999"'
assert msg in str(ctx.value)
def test_idempotent_connect(hdfs):
hdfs.connect()
hdfs.connect()
def test_ls_touch(hdfs):
assert not hdfs.ls('/tmp/test')
hdfs.touch(a)
hdfs.touch(b)
L = hdfs.ls('/tmp/test', True)
assert set(d['name'] for d in L) == set([a, b])
L = hdfs.ls('/tmp/test', False)
assert set(L) == set([a, b])
def test_rm(hdfs):
assert not hdfs.exists(a)
hdfs.touch(a)
assert hdfs.exists(a)
hdfs.rm(a)
assert not hdfs.exists(a)
def test_pickle(hdfs):
data = b'a' * (10 * 2**20)
with hdfs.open(a, 'wb', replication=1) as f:
f.write(data)
assert hdfs._handle
import pickle
hdfs2 = pickle.loads(pickle.dumps(hdfs))
assert hdfs2._handle
hdfs2.touch(b)
hdfs2.ls(b)
with hdfs2.open(c, 'wb', replication=1) as f:
f.write(data)
assert f._handle
with hdfs2.open(c, 'rb') as f:
f.seek(5)
f.read(10)
assert f._handle
with hdfs.open(d, 'wb', replication=1) as f:
f.write(data)
assert f._handle
def test_seek(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'123')
with hdfs.open(a) as f:
with pytest.raises(ValueError):
f.seek(1000)
with pytest.raises(ValueError):
f.seek(-1)
with pytest.raises(ValueError):
f.seek(-5, 2)
with pytest.raises(ValueError):
f.seek(0, 10)
f.seek(0)
assert f.read(1) == b'1'
f.seek(0)
assert f.read(1) == b'1'
f.seek(3)
assert f.read(1) == b''
f.seek(-1, 2)
assert f.read(1) == b'3'
f.seek(-1, 1)
f.seek(-1, 1)
assert f.read(1) == b'2'
for i in range(4):
assert f.seek(i) == i
def test_libload():
assert lib.hdfsGetLastError()
assert len(lib.hdfsGetLastError.__doc__) > 0
assert lib.hdfsFileIsOpenForRead(lib.hdfsFile()) is False
def test_bad_open(hdfs):
with pytest.raises(IOError):
hdfs.open('')
@pytest.mark.xfail
def test_write_blocksize(hdfs):
with hdfs.open(a, 'wb', block_size=10) as f:
f.write(b'a' * 25)
blocks = hdfs.get_block_locations(a)
assert len(blocks) == 3
assert blocks[0]['length'] == 10
assert blocks[1]['length'] == 10
assert blocks[2]['length'] == 5
with pytest.raises(ValueError):
hdfs.open(a, 'rb', block_size=123)
@pytest.mark.slow
def test_write_vbig(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b' ' * 2**31)
assert hdfs.info(a)['size'] == 2**31
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b' ' * (2**31 + 1))
assert hdfs.info(a)['size'] == 2**31 + 1
def test_replication(hdfs):
path = '/tmp/test/afile'
hdfs.open(path, 'wb', replication=0).close()
assert hdfs.info(path)['replication'] > 0
hdfs.open(path, 'wb', replication=1).close()
assert hdfs.info(path)['replication'] == 1
hdfs.open(path, 'wb', replication=2).close()
assert hdfs.info(path)['replication'] == 2
hdfs.set_replication(path, 3)
assert hdfs.info(path)['replication'] == 3
with pytest.raises(ValueError):
hdfs.set_replication(path, -1)
with pytest.raises(IOError):
hdfs.open(path, 'wb', replication=-1).close()
def test_errors(hdfs):
with pytest.raises((IOError, OSError)):
hdfs.open('/tmp/test/shfoshf', 'rb')
with pytest.raises((IOError, OSError)):
hdfs.touch('/tmp/test/shfoshf/x')
with pytest.raises((IOError, OSError)):
hdfs.rm('/tmp/test/shfoshf/x')
with pytest.raises((IOError, OSError)):
hdfs.mv('/tmp/test/shfoshf/x', '/tmp/test/shfoshf/y')
with pytest.raises((IOError, OSError)):
hdfs.open('/nonexistent/x', 'wb')
with pytest.raises((IOError, OSError)):
hdfs.open('/nonexistent/x', 'rb')
with pytest.raises(IOError):
hdfs.chown('/unknown', 'someone', 'group')
with pytest.raises(IOError):
hdfs.chmod('/unknonwn', 'rb')
with pytest.raises(IOError):
hdfs.rm('/unknown')
def test_makedirs(hdfs):
hdfs.makedirs('/tmp/test/a/b/c/d/e')
hdfs.info('/tmp/test/a/b/c/d/e')
tree = {'/tmp/test': (['c'], ['a', 'a1', 'a2', 'a3', 'b1']),
'/tmp/test/c': (['d'], ['x1', 'x2']),
'/tmp/test/c/d': ([], ['x3'])}
def test_glob(hdfs):
hdfs.mkdir('/tmp/test/c/')
hdfs.mkdir('/tmp/test/c/d/')
for fn in (posixpath.join(dirname, f)
for (dirname, (_, fils)) in tree.items()
for f in fils):
hdfs.touch(fn)
assert set(hdfs.glob('/tmp/test/a*')) == {'/tmp/test/a',
'/tmp/test/a1',
'/tmp/test/a2',
'/tmp/test/a3'}
assert set(hdfs.glob('/tmp/test/c/*')) == {'/tmp/test/c/x1',
'/tmp/test/c/x2',
'/tmp/test/c/d'}
assert (set(hdfs.glob('/tmp/test/c')) ==
set(hdfs.glob('/tmp/test/c/')) ==
set(hdfs.glob('/tmp/test/c/*')))
assert set(hdfs.glob('/tmp/test/a')) == {'/tmp/test/a'}
assert set(hdfs.glob('/tmp/test/a1')) == {'/tmp/test/a1'}
assert set(hdfs.glob('/tmp/test/*')) == {'/tmp/test/a',
'/tmp/test/a1',
'/tmp/test/a2',
'/tmp/test/a3',
'/tmp/test/b1',
'/tmp/test/c'}
def test_walk(hdfs):
hdfs.mkdir('/tmp/test/c/')
hdfs.mkdir('/tmp/test/c/d/')
for fn in (posixpath.join(dirname, f)
for (dirname, (_, fils)) in tree.items()
for f in fils):
hdfs.touch(fn)
def check(path, skipped):
seen = set()
for dirname, dirs, fils in hdfs.walk(path):
seen.add(dirname)
sol_dirs, sol_fils = tree[dirname]
assert set(dirs) == set(sol_dirs)
assert set(fils) == set(sol_fils)
assert seen == (set(tree) - skipped)
check('/tmp/test', {'/tmp'})
check('/tmp/test/c', {'/tmp', '/tmp/test'})
check('/tmp/test/c/d', {'/tmp', '/tmp/test', '/tmp/test/c'})
def test_info(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write('a' * 5)
info = hdfs.info(a)
assert info['size'] == 5
assert info['name'] == a
assert info['kind'] == 'file'
assert info['replication'] == 1
assert hdfs.info('/')['kind'] == 'directory'
def test_isdir_isfile(hdfs):
fn = '/tmp/test/a'
dir = '/tmp/test'
missing = '/tmp/not_a_real_path'
with hdfs.open(fn, 'wb', replication=1) as f:
f.write('a' * 5)
# isdir
assert hdfs.isdir(dir)
assert not hdfs.isdir(fn)
assert not hdfs.isdir(missing)
# isfile
assert hdfs.isfile(fn)
assert not hdfs.isfile(dir)
assert not hdfs.isfile(missing)
def test_df(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write('a' * 10)
with hdfs.open(b, 'wb', replication=1) as f:
f.write('a' * 10)
result = hdfs.df()
assert result['capacity'] > result['used']
def test_move(hdfs):
hdfs.touch(a)
assert hdfs.exists(a)
assert not hdfs.exists(b)
hdfs.mv(a, b)
assert not hdfs.exists(a)
assert hdfs.exists(b)
@pytest.mark.xfail
def test_copy(hdfs):
hdfs.touch(a)
assert hdfs.exists(a)
assert not hdfs.exists(b)
hdfs.cp(a, b)
assert hdfs.exists(a)
assert hdfs.exists(b)
def test_exists(hdfs):
assert not hdfs.exists(a)
hdfs.touch(a)
assert hdfs.exists(a)
hdfs.rm(a)
assert not hdfs.exists(a)
def test_cat(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'0123456789')
assert hdfs.cat(a) == b'0123456789'
with pytest.raises(IOError):
hdfs.cat(b)
def test_full_read(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'0123456789')
with hdfs.open(a, 'rb') as f:
assert len(f.read(4)) == 4
assert len(f.read(4)) == 4
assert len(f.read(4)) == 2
with hdfs.open(a, 'rb') as f:
assert len(f.read()) == 10
with hdfs.open(a, 'rb') as f:
assert f.tell() == 0
f.seek(3)
assert f.read(4) == b'3456'
assert f.tell() == 7
assert f.read(4) == b'789'
assert f.tell() == 10
def test_tail_head(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'0123456789')
assert hdfs.tail(a, 3) == b'789'
assert hdfs.head(a, 3) == b'012'
assert hdfs.tail(a, 100) == b'0123456789'
@pytest.yield_fixture
def conffile():
fd, fname = tempfile.mkstemp()
open(fname, 'w').write("""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.permissions.superusergroup</name>
<value>hadoop</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/mnt/data/dfs/nn</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.block.size</name>
<value>134217728</value>
</property>
<property>
<name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
</configuration>
""")
yield fname
if os.path.exists(fname):
os.unlink(fname)
def test_conf(conffile):
should = {'dfs.block.size': '134217728',
'dfs.datanode.hdfs-blocks-metadata.enabled': 'true',
'dfs.namenode.name.dir': '/mnt/data/dfs/nn',
'dfs.permissions': 'false',
'dfs.permissions.superusergroup': 'hadoop',
'dfs.replication': '3'}
assert conf_to_dict(conffile) == should
def test_read_delimited_block(hdfs):
fn = '/tmp/test/a'
delimiter = b'\n'
data = delimiter.join([b'123', b'456', b'789'])
with hdfs.open(fn, 'wb', replication=1) as f:
f.write(data)
assert hdfs.read_block(fn, 1, 2) == b'23'
assert hdfs.read_block(fn, 0, 1, delimiter=b'\n') == b'123\n'
assert hdfs.read_block(fn, 0, 2, delimiter=b'\n') == b'123\n'
assert hdfs.read_block(fn, 0, 3, delimiter=b'\n') == b'123\n'
assert hdfs.read_block(fn, 0, 5, delimiter=b'\n') == b'123\n456\n'
assert hdfs.read_block(fn, 0, 8, delimiter=b'\n') == b'123\n456\n789'
assert hdfs.read_block(fn, 0, 100, delimiter=b'\n') == b'123\n456\n789'
assert hdfs.read_block(fn, 1, 1, delimiter=b'\n') == b''
assert hdfs.read_block(fn, 1, 5, delimiter=b'\n') == b'456\n'
assert hdfs.read_block(fn, 1, 8, delimiter=b'\n') == b'456\n789'
for ols in [[(0, 3), (3, 3), (6, 3), (9, 2)],
[(0, 4), (4, 4), (8, 4)]]:
out = [hdfs.read_block(fn, o, l, b'\n') for o, l in ols]
assert b''.join(filter(None, out)) == data
@pytest.mark.parametrize(['lineterminator'], [(b'\n',), (b'--',)])
def test_readline(hdfs, lineterminator):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(lineterminator.join([b'123', b'456', b'789']))
with hdfs.open(a) as f:
assert f.readline(lineterminator=lineterminator) == b'123' + lineterminator
assert f.readline(lineterminator=lineterminator) == b'456' + lineterminator
assert f.readline(lineterminator=lineterminator) == b'789'
assert f.readline(lineterminator=lineterminator) == b''
@pytest.mark.parametrize(['lineterminator'], [(b'\n',), (b'--',)])
def test_mixed_readline(hdfs, lineterminator):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(lineterminator.join([b'123', b'456', b'789']))
with hdfs.open(a) as f:
assert f.read(1) == b'1'
assert f.readline(lineterminator=lineterminator) == b'23' + lineterminator
assert f.read(1) == b'4'
assert f.readline(lineterminator=lineterminator) == b'56' + lineterminator
def read_write(hdfs, q, i):
try:
hdfs.df()
hdfs.du('/', True, True)
data = b'0' * 10000
fn = '/tmp/test/foo%d' % i
with hdfs.open(fn, 'wb', replication=1) as f:
f.write(data)
with hdfs.open(fn, 'rb') as f:
data2 = f.read()
assert data == data2
except BaseException as e:
traceback.print_exc()
q.put(str(e))
else:
q.put(None)
@pytest.mark.skipif(sys.version_info < (3, 4), reason='No spawn')
def test_stress_embarrassing(hdfs):
if sys.version_info < (3, 4):
return
ctx = multiprocessing.get_context('spawn')
for proc, queue in [(Thread, Queue), (ctx.Process, ctx.Queue)]:
q = queue()
threads = [proc(target=read_write, args=(hdfs, q, i)) for
i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
while not q.empty():
error = q.get()
if error:
raise AssertionError("error in child: " + error)
def read_random_block(hdfs, fn, n, delim):
for i in range(10):
hdfs.read_block(fn, randint(0, n / 2), randint(n / 2, n), delim)
@pytest.mark.skipif(sys.version_info < (3, 4), reason='No spawn')
def test_stress_read_block(hdfs):
ctx = multiprocessing.get_context('spawn')
data = b'hello, world!\n' * 10000
for T in (Thread, ctx.Process,):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(data)
threads = [T(target=read_random_block, args=(hdfs, a, len(data), b'\n'))
for i in range(4)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
def test_different_handles():
a = HDFileSystem(host=test_host, port=test_port)
b = HDFileSystem(host=test_host, port=test_port)
assert a._handle.contents.filesystem != b._handle.contents.filesystem
def handle(q):
hdfs = HDFileSystem(host=test_host, port=test_port)
q.put(hdfs._handle.contents.filesystem)
@pytest.mark.skipif(sys.version_info < (3, 4), reason='No spawn')
def test_different_handles_in_processes():
ctx = multiprocessing.get_context('spawn')
hdfs = HDFileSystem(host=test_host, port=test_port)
q = ctx.Queue()
n = 20
procs = [ctx.Process(target=handle, args=(q,)) for i in range(n)]
for p in procs:
p.daemon = True
p.start()
s = {q.get() for i in range(n)}
assert not any(i == hdfs._handle.contents.filesystem for i in s)
for p in procs:
p.join()
@pytest.mark.skipif(not hasattr(os, 'fork'), reason='No fork()')
def test_warn_on_fork():
hdfs = HDFileSystem(host=test_host, port=test_port)
hdfs.disconnect()
pid = os.fork()
if not pid:
# In child
try:
with pytest.warns(RuntimeWarning) as record:
hdfs = HDFileSystem(host=test_host, port=test_port)
assert len(record) == 1
assert ("Attempting to re-use hdfs3 in child process"
in str(record[0].message))
except BaseException:
print("\n------ Child exception -------")
traceback.print_exc()
print("------------------------------")
os._exit(1)
else:
os._exit(0)
retpid, status = os.waitpid(pid, 0)
assert retpid == pid
if status:
pytest.fail("child raised exception")
def test_ensure():
assert isinstance(ensure_bytes(''), bytes)
assert isinstance(ensure_bytes(b''), bytes)
assert isinstance(ensure_string(''), unicode)
assert isinstance(ensure_string(b''), unicode)
def test_touch_exists(hdfs):
hdfs.touch(a)
assert hdfs.exists(a)
def test_write_in_read_mode(hdfs):
hdfs.touch(a)
with hdfs.open(a, 'rb') as f:
with pytest.raises(IOError):
f.write(b'123')
def test_readlines(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'123\n456')
with hdfs.open(a, 'rb') as f:
lines = f.readlines()
assert lines == [b'123\n', b'456']
with hdfs.open(a, 'rb') as f:
assert list(f) == lines
with hdfs.open(a, 'wb', replication=1) as f:
with pytest.raises(IOError):
f.read()
# too slow
# bigdata = [b'fe', b'fi', b'fo'] * 32000
# with hdfs.open(a, 'wb', replication=1) as f:
# f.write(b'\n'.join(bigdata))
# with hdfs.open(a, 'rb') as f:
# lines = list(f)
# assert all(l in [b'fe\n', b'fi\n', b'fo', b'fo\n'] for l in lines)
def test_readinto(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'123\n456')
with hdfs.open(a, 'rb') as f:
buf = bytearray(10)
assert f.readinto(length=7, out=buf) == 7
assert memoryview(buf).tobytes() == b'123\n456\x00\x00\x00'
with hdfs.open(a, 'rb') as f:
buf = bytearray(10)
assert f.readinto(length=7, out=buf) == 7
assert f.readinto(length=1, out=buf) == 0
with hdfs.open(a, 'wb', replication=1) as f:
buf = bytearray(10)
with pytest.raises(IOError):
f.readinto(length=1, out=buf)
def test_read_with_out_buffer(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'123\n456')
with hdfs.open(a, 'rb') as f:
buf = bytearray(10)
retbuf = f.read(length=7, out_buffer=buf)
assert memoryview(buf).tobytes() == b'123\n456\x00\x00\x00'
assert retbuf.tobytes() == b'123\n456'
# explicitly test None and True for out_buffer:
with hdfs.open(a, 'rb') as f:
retbytes = f.read(length=7, out_buffer=None)
assert retbytes == b'123\n456'
with hdfs.open(a, 'rb') as f:
retbuf = f.read(length=7, out_buffer=True)
assert retbuf.tobytes() == b'123\n456'
# too large read is ok if the file is smaller than the buffer:
with hdfs.open(a, 'rb') as f:
buf = bytearray(10)
retbuf = f.read(length=11, out_buffer=buf)
assert memoryview(buf).tobytes() == b'123\n456\x00\x00\x00'
assert retbuf.tobytes() == b'123\n456'
# buffer too small:
with hdfs.open(a, 'rb') as f:
buf = bytearray(6)
with pytest.raises(IOError):
retbuf = f.read(length=7, out_buffer=buf)
# file not open for reading:
with hdfs.open(a, 'wb', replication=1) as f:
buf = bytearray(10)
with pytest.raises(IOError):
f.read(length=1, out_buffer=buf)
def test_put(hdfs):
data = b'1234567890' * 10000
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write(data)
hdfs.put(fn, a)
assert hdfs.cat(a) == data
def test_getmerge(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'123')
with hdfs.open(b, 'wb', replication=1) as f:
f.write(b'456')
with tmpfile() as fn:
hdfs.getmerge('/tmp/test', fn)
with open(fn, 'rb') as f:
data = f.read()
assert data == b'123456'
def test_get(hdfs):
data = b'1234567890'
with tmpfile() as fn:
with hdfs.open(a, 'wb', replication=1) as f:
f.write(data)
hdfs.get(a, fn)
with open(fn, 'rb') as f:
data2 = f.read()
assert data2 == data
with pytest.raises(IOError):
hdfs.get(b, fn)
def test_open_errors(hdfs):
hdfs.touch(a)
with pytest.raises(ValueError):
hdfs.open(a, 'rb', block_size=1000)
hdfs.disconnect()
with pytest.raises(IOError):
hdfs.open(a, 'wb', replication=1)
def test_du(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'123')
with hdfs.open(b, 'wb', replication=1) as f:
f.write(b'4567')
assert hdfs.du('/tmp/test') == {a: 3, b: 4}
assert hdfs.du('/tmp/test/', total=True) == {'/tmp/test/': 3 + 4}
def test_get_block_locations(hdfs):
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'123')
locs = hdfs.get_block_locations(a)
assert len(locs) == 1
assert locs[0]['length'] == 3
def test_chmod(hdfs):
hdfs.touch(a)
assert hdfs.ls(a, True)[0]['permissions'] == 0o777
hdfs.chmod(a, 0o500)
assert hdfs.ls(a, True)[0]['permissions'] == 0o500
hdfs.chmod(a, 0o100)
with pytest.raises(IOError):
hdfs.open(a, 'ab')
@pytest.mark.xfail
def test_chown(hdfs):
hdfs.touch(a)
hdfs.info(a)
hdfs.chown(a, 'root', 'supergroup')
def test_text_bytes(hdfs):
with pytest.raises(NotImplementedError):
hdfs.open(a, 'wt')
with pytest.raises(NotImplementedError):
hdfs.open(a, 'rt')
try:
hdfs.open(a, 'r')
except NotImplementedError as e:
assert 'rb' in str(e)
try:
hdfs.open(a, 'w')
except NotImplementedError as e:
assert 'wb' in str(e)
try:
hdfs.open(a, 'a')
except NotImplementedError as e:
assert 'ab' in str(e)
with hdfs.open(a, 'wb', replication=1) as f:
f.write(b'123')
with hdfs.open(a, 'rb') as f:
b = f.read()
assert b == b'123'
def test_open_deep_file(hdfs):
with pytest.raises(IOError) as ctx:
hdfs.open('/tmp/test/a/b/c/d/e/f', 'wb')
msg = "Could not open file: /tmp/test/a/b/c/d/e/f, mode: wb " \
"Parent directory doesn't exist"
assert msg in str(ctx.value)
def test_append(hdfs):
with hdfs.open(a, mode='ab', replication=1) as f:
f.write(b'123')
with hdfs.open(a, mode='ab', replication=1) as f:
f.write(b'456')
with hdfs.open(a, mode='rb') as f:
assert f.read() == b'123456'
with hdfs.open(a, mode='ab', replication=1) as f:
f.write(b'789')
with hdfs.open(a, mode='rb') as f:
assert f.read() == b'123456789'
with pytest.raises(IOError):
with hdfs.open(b, mode='ab', replication=2) as f:
f.write(b'123')
with hdfs.open(b, mode='ab', replication=2) as f:
f.write(b'456')
def test_write_empty(hdfs):
with hdfs.open(a, mode='wb', replication=1) as f:
f.write(b'')
with hdfs.open(a, mode='rb') as f:
assert f.read() == b''
def test_gzip(hdfs):
import gzip
data = b'name,amount\nAlice,100\nBob,200'
with hdfs.open(a, mode='wb', replication=1) as f:
with gzip.GzipFile(fileobj=f) as g:
g.write(b'name,amount\nAlice,100\nBob,200')
with hdfs.open(a) as f:
with gzip.GzipFile(fileobj=f) as g:
bytes = g.read()
assert bytes == data
def test_fooable(hdfs):
hdfs.touch(a)
with hdfs.open(a, mode='rb', replication=1) as f:
assert f.readable()
assert f.seekable()
assert not f.writable()
with hdfs.open(a, mode='wb', replication=1) as f:
assert not f.readable()
assert not f.seekable()
assert f.writable()
def test_closed(hdfs):
hdfs.touch(a)
f = hdfs.open(a, mode='rb')
assert not f.closed
f.close()
assert f.closed
def test_TextIOWrapper(hdfs):
with hdfs.open(a, mode='wb', replication=1) as f:
f.write(b'1,2\n3,4\n5,6')
with hdfs.open(a, mode='rb') as f:
ff = io.TextIOWrapper(f)
data = list(ff)
assert data == ['1,2\n', '3,4\n', '5,6']
def test_array(hdfs):
from array import array
data = array('B', [65] * 1000)
with hdfs.open(a, 'wb', replication=1) as f:
f.write(data)
with hdfs.open(a, 'rb') as f:
out = f.read()
assert out == b'A' * 1000
def test_next(hdfs):
data = b'Sometimes you eat the bear\nAnd sometimes, well\nHe eats you'
splitted_lines = data.split(b'\n')
with hdfs.open(a, 'wb', replication=1) as f:
f.write(data)
with hdfs.open(a, 'rb') as f:
for splitted_line in splitted_lines:
assert splitted_line == next(f).strip()
with pytest.raises(StopIteration):
next(f)
def test_concat(hdfs):
out = b''
for fn, data in zip([a, b, c, d], [b'a', b'b', b'c', b'd']):
with hdfs.open(fn, 'wb', block_size=1048576, replication=1) as f:
f.write(data * 1048576)
out += data * 1048576
if fn == d:
# last block can be non-full
f.write(b'extra')
out += b'extra'
hdfs.concat(a, [b, c, d])
data = hdfs.cat(a)
assert out == data
|
lisp-itr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
from future import standard_library
standard_library . install_aliases ( )
from builtins import str
from builtins import range
import lisp
import lispconfig
import socket
import select
import threading
import time
import os
from subprocess import getoutput
import struct
try :
import pcappy
except :
pass
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
import pcapy
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
Oo0o = [ None , None , None ]
OOO0o0o = None
Ii1iI = None
Oo = None
I1Ii11I1Ii1i = None
Ooo = lisp . lisp_get_ephemeral_port ( )
o0oOoO00o = lisp . lisp_get_ephemeral_port ( )
i1 = None
oOOoo00O0O = None
i1111 = None
i11 = None
if 41 - 41: I1Ii111 . ooOoO0o * IiII % i11iIiiIii
if 74 - 74: iII111i * IiII
if 82 - 82: iIii1I11I1II1 % IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
iiIIIIi1i1 = False
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
IIIiI11ii = threading . Lock ( )
if 52 - 52: iII111i + OOooOOo % OoooooooOO / i11iIiiIii
if 25 - 25: O0 * oO0o + OoooooooOO
if 70 - 70: OOooOOo / Ii1I . Ii1I
if 11 - 11: ooOoO0o / O0 - i1IIi
if 85 - 85: OOooOOo % I1ii11iIi11i * ooOoO0o
if 90 - 90: o0oOOo0O0Ooo % o0oOOo0O0Ooo % I11i * OoOoOO00
if 26 - 26: Ii1I - o0oOOo0O0Ooo
if 63 - 63: II111iiii . II111iiii
def Ii1 ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "ITR" , [ ] ) )
if 71 - 71: OoO0O00
if 55 - 55: OoO0O00 / I1ii11iIi11i * OOooOOo
if 86 - 86: i11iIiiIii + Ii1I + ooOoO0o * I11i + o0oOOo0O0Ooo
if 61 - 61: OoO0O00 / i11iIiiIii
if 34 - 34: OoooooooOO + iIii1I11I1II1 + i11iIiiIii - I1ii11iIi11i + i11iIiiIii
if 65 - 65: OoOoOO00
if 6 - 6: I1IiiI / Oo0Ooo % Ii1I
def oo ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ITR" ) )
if 54 - 54: OOooOOo + OOooOOo % I1Ii111 % i11iIiiIii / iIii1I11I1II1 . OOooOOo
if 57 - 57: Ii1I % OoooooooOO
if 61 - 61: iII111i . iIii1I11I1II1 * I1IiiI . ooOoO0o % Oo0Ooo
if 72 - 72: OOooOOo
if 63 - 63: Ii1I
if 86 - 86: ooOoO0o . I1IiiI % Oo0Ooo + o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1 % oO0o * I11i % I11i + II111iiii * iII111i
if 54 - 54: I11i + IiII / iII111i
def IIII ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "ITR" ) )
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
def OooOooooOOoo0 ( lisp_sockets , lisp_ephem_port ) :
lisp . lisp_set_exception ( )
if 71 - 71: I1Ii111 % oO0o % OOooOOo
if 94 - 94: oO0o - iII111i * O0
if 17 - 17: I1ii11iIi11i % II111iiii
if 13 - 13: I1Ii111 % OoOoOO00 - i11iIiiIii . I1IiiI + II111iiii
for II111ii1II1i in list ( lisp . lisp_crypto_keys_by_nonce . values ( ) ) :
for OoOo00o in II111ii1II1i : del ( OoOo00o )
if 70 - 70: iII111i * I1ii11iIi11i
lisp . lisp_crypto_keys_by_nonce = { }
if 46 - 46: ooOoO0o / OoO0O00
if 52 - 52: o0oOOo0O0Ooo - OoooooooOO + Ii1I + Ii1I - o0oOOo0O0Ooo / I1Ii111
if 44 - 44: ooOoO0o . i1IIi - I1ii11iIi11i . O0 - ooOoO0o
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if ( lisp . lisp_l2_overlay ) :
OooO0OoOOOO = lisp . LISP_AFI_MAC
i1Ii = lisp . lisp_default_iid
o00OO00OoO = lisp . lisp_address ( OooO0OoOOOO , "0000-0000-0000" , 0 , i1Ii )
o00OO00OoO . mask_len = 0
OOOO0OOoO0O0 = lisp . lisp_address ( OooO0OoOOOO , "ffff-ffff-ffff" , 48 , i1Ii )
lisp . lisp_send_map_request ( lisp_sockets , lisp_ephem_port , o00OO00OoO , OOOO0OOoO0O0 , None )
if 65 - 65: IiII * I1IiiI + Ii1I % i11iIiiIii * oO0o . I1Ii111
if 100 - 100: O0 + IiII - OOooOOo + i11iIiiIii * Ii1I
if 30 - 30: o0oOOo0O0Ooo . Ii1I - OoooooooOO
if 8 - 8: i1IIi - iIii1I11I1II1 * II111iiii + i11iIiiIii / I1Ii111 % OOooOOo
if 16 - 16: I1ii11iIi11i + OoO0O00 - II111iiii
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 85 - 85: OoOoOO00 + i1IIi
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
i1111 = threading . Timer ( 60 , OooOooooOOoo0 ,
[ lisp_sockets , lisp_ephem_port ] )
i1111 . start ( )
return
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
def o000O0o ( lisp_socket ) :
lisp . lisp_set_exception ( )
if 42 - 42: OoOoOO00
II = lisp . lisp_get_timestamp ( )
for Ii1I1IIii1II in lisp . lisp_db_list :
if ( Ii1I1IIii1II . dynamic_eid_configured ( ) == False ) : continue
if 65 - 65: Ii1I . iIii1I11I1II1 / O0 - Ii1I
iii1i1iiiiIi = [ ]
for Iiii in list ( Ii1I1IIii1II . dynamic_eids . values ( ) ) :
OO0OoO0o00 = Iiii . last_packet
if ( OO0OoO0o00 == None ) : continue
if ( OO0OoO0o00 + Iiii . timeout > II ) : continue
if 53 - 53: O0 * OoO0O00 + OOooOOo
if 50 - 50: O0 . O0 - oO0o / I1IiiI - o0oOOo0O0Ooo * OoOoOO00
if 61 - 61: I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if ( lisp . lisp_program_hardware ) :
o0o = Iiii . dynamic_eid . print_prefix_no_iid ( )
if ( lisp . lisp_arista_is_alive ( o0o ) ) :
lisp . lprint ( ( "Hardware indicates dynamic-EID {} " + "still active" ) . format ( lisp . green ( o0o , False ) ) )
if 84 - 84: O0
continue
if 74 - 74: I1ii11iIi11i - I1IiiI - Oo0Ooo . Ii1I - IiII
if 73 - 73: Oo0Ooo - i1IIi - i1IIi - iII111i . Ii1I + I1ii11iIi11i
if 81 - 81: iII111i * oO0o - I1Ii111 . II111iiii % I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
i1iIIIi1i = Iiii . dynamic_eid . print_address ( )
iI1iIIiiii = "learn%{}%None" . format ( i1iIIIi1i )
iI1iIIiiii = lisp . lisp_command_ipc ( iI1iIIiiii , "lisp-itr" )
lisp . lisp_ipc ( iI1iIIiiii , lisp_socket , "lisp-etr" )
if 26 - 26: I11i . OoooooooOO
lisp . lprint ( "Dynamic-EID {}" . format ( lisp . bold ( lisp . green ( i1iIIIi1i , False ) + " activity timeout" ,
# o0oOOo0O0Ooo % iII111i * O0
False ) ) )
iii1i1iiiiIi . append ( i1iIIIi1i )
if 87 - 87: i11iIiiIii
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
for i1iIIIi1i in iii1i1iiiiIi : Ii1I1IIii1II . dynamic_eids . pop ( i1iIIIi1i )
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
o000O0o , [ lisp_socket ] ) . start ( )
return
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
def I1Ii ( ) :
if ( lisp . lisp_is_macos ( ) ) : return ( [ "en0" , "en1" , "lo0" ] )
if 94 - 94: Ii1I - II111iiii . OOooOOo % I11i . i11iIiiIii + O0
if 26 - 26: I11i - iIii1I11I1II1 - I1IiiI / OoO0O00 . OoOoOO00 % iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo . iIii1I11I1II1 / oO0o + i1IIi
if 42 - 42: ooOoO0o . o0oOOo0O0Ooo . ooOoO0o - I1ii11iIi11i
i1ii1I1I1 = "Link encap"
oO = getoutput ( "ifconfig | egrep '{}'" . format ( i1ii1I1I1 ) )
if ( oO == "" ) :
i1ii1I1I1 = ": flags="
oO = getoutput ( "ifconfig | egrep '{}'" . format ( i1ii1I1I1 ) )
if 82 - 82: OoOoOO00 % OOooOOo
if 64 - 64: Ii1I . II111iiii + OoooooooOO . I1Ii111 . o0oOOo0O0Ooo
oO = oO . split ( "\n" )
if 99 - 99: Ii1I / Oo0Ooo / IiII % I1IiiI
i11I1II1I11i = [ ]
for OooOoOO0 in oO :
iI1i11iII111 = OooOoOO0 . split ( i1ii1I1I1 ) [ 0 ] . replace ( " " , "" )
i11I1II1I11i . append ( iI1i11iII111 )
if 15 - 15: i11iIiiIii % Ii1I . Oo0Ooo + I1ii11iIi11i
return ( i11I1II1I11i )
if 61 - 61: Oo0Ooo * I1ii11iIi11i % Oo0Ooo - i1IIi - iIii1I11I1II1
if 74 - 74: I1ii11iIi11i + II111iiii / OoO0O00
if 100 - 100: OoOoOO00 * iIii1I11I1II1
if 86 - 86: OoO0O00 * OOooOOo . iII111i
if 32 - 32: o0oOOo0O0Ooo . IiII * I11i
if 93 - 93: o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
def O00o0OO0 ( ) :
global Oo0o
global OOO0o0o
global Ii1iI
global Oo
global I1Ii11I1Ii1i
global i1 , oOOoo00O0O
if 35 - 35: oO0o % ooOoO0o / I1Ii111 + iIii1I11I1II1 . OoooooooOO . I1IiiI
lisp . lisp_i_am ( "itr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ITR starting up" )
if 71 - 71: IiII * II111iiii * oO0o
if 56 - 56: I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
Oo0o [ 0 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV4 )
Oo0o [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
OOO0o0o = lisp . lisp_open_listen_socket ( "" , "lisp-itr" )
Ii1iI = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
Oo0o [ 2 ] = OOO0o0o
oooOo0OOOoo0 = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
Oo = lisp . lisp_open_listen_socket ( oooOo0OOOoo0 ,
str ( Ooo ) )
if 51 - 51: Oo0Ooo / OoOoOO00 . OOooOOo * o0oOOo0O0Ooo + OoO0O00 * IiII
if 73 - 73: OoO0O00 + OoooooooOO - O0 - Ii1I - II111iiii
if 99 - 99: ooOoO0o . Ii1I + I1Ii111 + OoooooooOO % o0oOOo0O0Ooo
if 51 - 51: iIii1I11I1II1
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( o0oOoO00o ) )
if 34 - 34: oO0o + I1IiiI - oO0o
if 17 - 17: II111iiii % iII111i + I11i - iII111i / OOooOOo + ooOoO0o
if 59 - 59: OOooOOo % OoOoOO00 . Ii1I * I1ii11iIi11i % I11i
if 59 - 59: oO0o - iII111i
i1 = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
i1 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 15 - 15: I1Ii111 . i11iIiiIii . OoooooooOO / OoO0O00 % Ii1I
if ( lisp . lisp_is_raspbian ( ) == False ) :
oOOoo00O0O = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 93 - 93: O0 % i1IIi . OOooOOo / I1IiiI - I1Ii111 / I1IiiI
if 36 - 36: oO0o % oO0o % i1IIi / i1IIi - ooOoO0o
if 30 - 30: I11i / I1IiiI
if 35 - 35: II111iiii % OOooOOo . ooOoO0o + ooOoO0o % II111iiii % II111iiii
if 72 - 72: II111iiii + i1IIi + o0oOOo0O0Ooo
if 94 - 94: oO0o . i1IIi - o0oOOo0O0Ooo % O0 - OoO0O00
if 72 - 72: Ii1I
if 1 - 1: OoO0O00 * IiII * OoooooooOO + ooOoO0o
lisp . lisp_ipc_socket = OOO0o0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
threading . Thread ( target = o00Oo0oooooo ) . start ( )
if 76 - 76: I11i / OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
lisp . lisp_load_checkpoint ( )
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
i1111 = threading . Timer ( 60 , OooOooooOOoo0 ,
[ Oo0o , Ooo ] )
i1111 . start ( )
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
o000O0o , [ OOO0o0o ] ) . start ( )
return ( True )
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
def I1 ( ) :
oOoOo0O0OOOoO = open ( "./lisp.config" , "r" )
if 50 - 50: ooOoO0o
IIIIiii1IIii = False
II1i11I = 0
for ii1I1IIii11 in oOoOo0O0OOOoO :
if ( ii1I1IIii11 == "lisp database-mapping {\n" ) : IIIIiii1IIii = True
if ( ii1I1IIii11 == "}\n" ) : IIIIiii1IIii = False
if ( IIIIiii1IIii == False ) : continue
if ( ii1I1IIii11 [ 0 ] == " " and ii1I1IIii11 . find ( "prefix {" ) != - 1 ) : II1i11I += 1
if 67 - 67: iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
oOoOo0O0OOOoO . close ( )
return ( II1i11I )
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
def I1iiiiIii ( ) :
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
II1i11I = I1 ( )
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
III11I1 = os . getenv ( "LISP_ITR_WAIT_TIME" )
III11I1 = 1 if ( III11I1 == None ) else int ( III11I1 )
if 36 - 36: oO0o - Ii1I . Oo0Ooo - i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
if 95 - 95: I1IiiI
if 46 - 46: OoOoOO00 + OoO0O00
while ( II1i11I != len ( lisp . lisp_db_list ) ) :
lisp . lprint ( ( "Waiting {} second(s) for {} database-mapping EID-" + "prefixes, {} processed so far ..." ) . format ( III11I1 , II1i11I ,
# I1IiiI / iIii1I11I1II1 % oO0o * OoooooooOO % ooOoO0o
len ( lisp . lisp_db_list ) ) )
time . sleep ( III11I1 )
if 25 - 25: I1ii11iIi11i . ooOoO0o
if 24 - 24: oO0o / i11iIiiIii + oO0o
if 20 - 20: I11i + Ii1I / O0 % iIii1I11I1II1
if 88 - 88: OoOoOO00 / II111iiii
if 87 - 87: I1ii11iIi11i - I1ii11iIi11i - iII111i + oO0o
if 82 - 82: oO0o / iIii1I11I1II1 . I1IiiI . OOooOOo / o0oOOo0O0Ooo
iiI1I1 = [ ]
ooO = [ ]
for Ii1I1IIii1II in lisp . lisp_db_list :
if ( Ii1I1IIii1II . eid . is_ipv4 ( ) or Ii1I1IIii1II . eid . is_ipv6 ( ) or Ii1I1IIii1II . eid . is_mac ( ) ) :
i1iIIIi1i = Ii1I1IIii1II . eid . print_prefix_no_iid ( )
if ( Ii1I1IIii1II . dynamic_eid_configured ( ) ) : ooO . append ( i1iIIIi1i )
iiI1I1 . append ( i1iIIIi1i )
if 6 - 6: iIii1I11I1II1 . ooOoO0o % o0oOOo0O0Ooo
if 50 - 50: iII111i + O0 + Ii1I . II111iiii / o0oOOo0O0Ooo
return ( iiI1I1 , ooO )
if 17 - 17: Ii1I % iIii1I11I1II1 - iIii1I11I1II1
if 78 - 78: iII111i + I11i . ooOoO0o - iII111i . Ii1I
if 30 - 30: I1IiiI + OoO0O00 % Ii1I * iII111i / Oo0Ooo - I11i
if 64 - 64: iIii1I11I1II1
if 21 - 21: Oo0Ooo . II111iiii
if 54 - 54: II111iiii % II111iiii
if 86 - 86: O0 % Ii1I * ooOoO0o * iIii1I11I1II1 * i1IIi * I11i
if 83 - 83: OoOoOO00 % II111iiii - OoOoOO00 + IiII - O0
def o00Oo0oooooo ( ) :
global IIIiI11ii
if 52 - 52: Oo0Ooo * ooOoO0o
lisp . lisp_set_exception ( )
if 33 - 33: Ii1I
if 74 - 74: OOooOOo + O0 + i1IIi - i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
if 46 - 46: IiII
iiI1I1 , ooO = I1iiiiIii ( )
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
Oo00O = None
if ( lisp . lisp_ipc_data_plane ) :
lisp . lprint ( lisp . bold ( "Data-plane packet capture disabled" , False ) )
Oo00O = "(udp src port 4342 and ip[28] == 0x28)" + " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 12 - 12: o0oOOo0O0Ooo - ooOoO0o * I1Ii111
if 14 - 14: Oo0Ooo - Ii1I % Ii1I * O0 . i11iIiiIii / O0
lisp . lprint ( "Control-plane capture: '{}'" . format ( Oo00O ) )
else :
lisp . lprint ( "Capturing packets for source-EIDs {}" . format ( lisp . green ( str ( iiI1I1 ) , False ) ) )
if 79 - 79: o0oOOo0O0Ooo - I11i + o0oOOo0O0Ooo . oO0o
if 28 - 28: i1IIi - iII111i
if ( lisp . lisp_pitr ) : lisp . lprint ( "Configured for PITR functionality" )
if 54 - 54: iII111i - O0 % OOooOOo
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
if 28 - 28: I11i
if 58 - 58: OoOoOO00
if 37 - 37: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i
oo0oOOo0 = lisp . lisp_l2_overlay
if ( oo0oOOo0 == False ) :
if ( lisp . lisp_is_linux ( ) ) : O0OoO0ooOO0o ( iiI1I1 , ooO )
if 81 - 81: O0 * II111iiii + I1IiiI * i11iIiiIii - I1ii11iIi11i / I1IiiI
if 63 - 63: OoOoOO00 - OoooooooOO % I1Ii111
if 77 - 77: OoO0O00 . i1IIi
if 35 - 35: ooOoO0o * OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / O0
if 100 - 100: I1Ii111 . o0oOOo0O0Ooo * Oo0Ooo % O0 * O0
if 14 - 14: I1ii11iIi11i . ooOoO0o + II111iiii / iII111i / I11i
if ( Oo00O == None ) :
if ( lisp . lisp_pitr ) :
ooo0O = iII1iii ( iiI1I1 , [ ] , False , True )
else :
ooo0O = iII1iii ( iiI1I1 , ooO , oo0oOOo0 ,
False )
if 12 - 12: OOooOOo
else :
ooo0O = Oo00O
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
oO = I1Ii ( )
oOIIi1iiii1iI = os . getenv ( "LISP_PCAP_LIST" )
if ( oOIIi1iiii1iI == None ) :
iIiiii = ""
O0000OOO0 = [ ]
else :
ooo0 = list ( set ( oOIIi1iiii1iI . split ( ) ) & set ( oO ) )
O0000OOO0 = list ( set ( oOIIi1iiii1iI . split ( ) ) ^ set ( oO ) )
iIiiii = "user-selected "
lisp . lprint ( "User pcap-list: {}, active-interfaces: {}" . format ( oOIIi1iiii1iI , oO ) )
if 78 - 78: ooOoO0o
oO = ooo0
if 53 - 53: ooOoO0o * OOooOOo . iII111i / O0 * ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
O000 = ( ooo0O . find ( "ether host" ) != - 1 )
for ooo0o000O in oO :
if ( ooo0o000O in [ "lo" , "lispers.net" ] and O000 ) :
lisp . lprint ( ( "Capturing suppressed on interface {}, " + "MAC filters configured" ) . format ( ooo0o000O ) )
if 100 - 100: oO0o . ooOoO0o * I1ii11iIi11i / iIii1I11I1II1 * i1IIi % ooOoO0o
continue
if 17 - 17: I11i . IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if ( lisp . lisp_is_macos ( ) ) :
if ( ooo0o000O not in [ "en0" , "lo0" ] ) : continue
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
OOO = [ ooo0o000O , ooo0O , IIIiI11ii ]
lisp . lprint ( "Capturing packets on {}interface {}" . format ( iIiiii , ooo0o000O ) )
threading . Thread ( target = Iiiiii1iI , args = OOO ) . start ( )
if 49 - 49: o0oOOo0O0Ooo . IiII / OoO0O00 + II111iiii
if ( Oo00O ) : return
if 47 - 47: O0 / Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
oOooO0 = "(udp src port 4342 and ip[28] == 0x28)"
for ooo0o000O in O0000OOO0 :
OOO = [ ooo0o000O , oOooO0 , IIIiI11ii ]
lisp . lprint ( "Capture RLOC-probe replies on RLOC interface {}" . format ( ooo0o000O ) )
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
threading . Thread ( target = Iiiiii1iI , args = OOO ) . start ( )
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
return
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
def IiI11i1IIiiI ( ) :
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if ( i11 ) : i11 . cancel ( )
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
lisp . lisp_close_socket ( Oo0o [ 0 ] , "" )
lisp . lisp_close_socket ( Oo0o [ 1 ] , "" )
lisp . lisp_close_socket ( Oo , "" )
lisp . lisp_close_socket ( I1Ii11I1Ii1i , "" )
lisp . lisp_close_socket ( OOO0o0o , "lisp-itr" )
lisp . lisp_close_socket ( Ii1iI , "lispers.net-itr" )
return
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
def III ( packet , device , input_interface , macs , my_sa ) :
global Oo0o
global Ooo
global i1 , oOOoo00O0O
global OOO0o0o
if 41 - 41: i11iIiiIii + Oo0Ooo / I1IiiI . OoooooooOO % oO0o % i1IIi
if 70 - 70: Oo0Ooo . OoooooooOO - iII111i
if 30 - 30: I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
oOo0oO = packet
packet , IIi1IIIIi , OOOoO , I1i = lisp . lisp_is_rloc_probe ( packet , 1 )
if ( oOo0oO != packet ) :
if ( IIi1IIIIi == None ) : return
lisp . lisp_parse_packet ( Oo0o , packet , IIi1IIIIi , OOOoO , I1i )
return
if 12 - 12: OoooooooOO
if 20 - 20: i1IIi - I11i
packet = lisp . lisp_packet ( packet )
if ( packet . decode ( False , None , None ) == None ) : return
if 30 - 30: OoOoOO00
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
if ( my_sa ) : input_interface = device
if 19 - 19: II111iiii * IiII + Ii1I
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
o0oOoO0O = packet . inner_source
i1Ii = lisp . lisp_get_interface_instance_id ( input_interface , o0oOoO0O )
packet . inner_dest . instance_id = i1Ii
packet . inner_source . instance_id = i1Ii
if 84 - 84: O0 * OoooooooOO - IiII * IiII
if 8 - 8: ooOoO0o / i1IIi . oO0o
if 41 - 41: iII111i + OoO0O00
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
if ( macs != "" ) : macs = ", MACs: " + macs + ","
packet . print_packet ( "Receive {}{}" . format ( device , macs ) , False )
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if ( device != input_interface and device != "lispers.net" ) :
lisp . dprint ( "Not our MAC address on interface {}, pcap interface {}" . format ( input_interface , device ) )
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
return
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
oO0OO0 = lisp . lisp_decent_push_configured
if ( oO0OO0 ) :
O00Oo = packet . inner_dest . is_multicast_address ( )
Ii1111IiIi = packet . inner_source . is_local ( )
oO0OO0 = ( Ii1111IiIi and O00Oo )
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if ( oO0OO0 == False ) :
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
Ii1I1IIii1II = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_source , False )
if ( Ii1I1IIii1II == None ) :
lisp . dprint ( "Packet received from non-EID source" )
return
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
if ( Ii1I1IIii1II . dynamic_eid_configured ( ) ) :
IIiiii = lisp . lisp_allow_dynamic_eid ( input_interface ,
packet . inner_source )
if ( IIiiii ) :
lisp . lisp_itr_discover_eid ( Ii1I1IIii1II , packet . inner_source ,
input_interface , IIiiii , OOO0o0o )
else :
iI111i1I1II = lisp . green ( packet . inner_source . print_address ( ) , False )
lisp . dprint ( "Disallow dynamic-EID {} on interface {}" . format ( iI111i1I1II ,
input_interface ) )
return
if 96 - 96: I1Ii111 / Oo0Ooo * II111iiii - iII111i * Oo0Ooo
if 81 - 81: IiII . o0oOOo0O0Ooo / I1Ii111
if 17 - 17: i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + I11i - ooOoO0o
if ( packet . inner_source . is_local ( ) and
packet . udp_dport == lisp . LISP_CTRL_PORT ) : return
if 78 - 78: I11i * OoOoOO00 . O0 / O0
if 80 - 80: i1IIi - Oo0Ooo / OoO0O00 - i11iIiiIii
if 68 - 68: oO0o - I1ii11iIi11i % O0 % I1Ii111
if 11 - 11: O0 / OoO0O00 % OOooOOo + o0oOOo0O0Ooo + iIii1I11I1II1
if 40 - 40: ooOoO0o - OOooOOo . Ii1I * Oo0Ooo % I1Ii111
OoO = False
if ( packet . inner_version == 4 ) :
OoO , packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
else :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
packet . encap_port = lisp . LISP_L2_DATA_PORT
if 54 - 54: I11i / I1IiiI * oO0o + OoooooooOO - iII111i / OoooooooOO
if 19 - 19: IiII * ooOoO0o * o0oOOo0O0Ooo + O0 / O0
if 73 - 73: iIii1I11I1II1 / iIii1I11I1II1 - oO0o
if 91 - 91: oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if ( iiIIIIi1i1 == False ) :
Ii1I1IIii1II = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( Ii1I1IIii1II and Ii1I1IIii1II . dynamic_eid_configured == False ) :
lisp . dprint ( ( "Packet destined to local EID-prefix {}, " + "natively forwarding" ) . format ( Ii1I1IIii1II . print_eid_tuple ( ) ) )
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
packet . send_packet ( i1 , packet . inner_dest )
return
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
I11iIiII = lisp . lisp_map_cache_lookup ( packet . inner_source , packet . inner_dest )
if ( I11iIiII ) : I11iIiII . add_recent_source ( packet . inner_source )
if 66 - 66: Oo0Ooo - o0oOOo0O0Ooo * IiII + OoOoOO00 + o0oOOo0O0Ooo - iIii1I11I1II1
if 17 - 17: oO0o
if 22 - 22: I11i + iIii1I11I1II1
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if 17 - 17: I1ii11iIi11i . II111iiii . ooOoO0o / I1ii11iIi11i
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
oO00oOo0OOO = Ii1I1IIii1II . secondary_iid if ( Ii1I1IIii1II != None ) else None
if ( oO00oOo0OOO and I11iIiII and I11iIiII . action == lisp . LISP_NATIVE_FORWARD_ACTION ) :
ii1 = packet . inner_dest
ii1 . instance_id = oO00oOo0OOO
I11iIiII = lisp . lisp_map_cache_lookup ( packet . inner_source , ii1 )
if ( I11iIiII ) : I11iIiII . add_recent_source ( packet . inner_source )
if 51 - 51: O0 . oO0o + i11iIiiIii
if 79 - 79: OoOoOO00 . oO0o . IiII % Ii1I
if 65 - 65: i11iIiiIii + i1IIi - Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if ( I11iIiII == None or lisp . lisp_mr_or_pubsub ( I11iIiII . action ) ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_dest ) ) : return
if 12 - 12: OOooOOo
ooOo0O = ( I11iIiII and I11iIiII . action == lisp . LISP_SEND_PUBSUB_ACTION )
lisp . lisp_send_map_request ( Oo0o , Ooo ,
packet . inner_source , packet . inner_dest , None , ooOo0O )
if 37 - 37: Ii1I % OoO0O00
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "map-cache miss" )
if 79 - 79: I1ii11iIi11i + I1IiiI / I1IiiI
return
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
if ( I11iIiII and I11iIiII . is_active ( ) and I11iIiII . has_ttl_elapsed ( ) ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_dest ) == False ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( I11iIiII . print_eid_tuple ( ) , False ) ) )
if 1 - 1: ooOoO0o
lisp . lisp_send_map_request ( Oo0o , Ooo ,
packet . inner_source , packet . inner_dest , None )
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
I11iIiII . last_refresh_time = time . time ( )
I11iIiII . stats . increment ( len ( packet . packet ) )
if 83 - 83: I1Ii111
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
OO0OoOOO0 , O00ooOo , oOO0o00O , oOoO , IIIIiI1iiiIiii , ii1i1i = I11iIiII . select_rloc ( packet , OOO0o0o )
if 50 - 50: o0oOOo0O0Ooo * Ii1I % I1ii11iIi11i / Oo0Ooo - O0 % iII111i
if 48 - 48: I1IiiI + I1ii11iIi11i + II111iiii * i11iIiiIii
if ( OO0OoOOO0 == None and IIIIiI1iiiIiii == None ) :
if ( oOoO == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
packet . send_packet ( i1 , packet . inner_dest )
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "not an EID" )
if 39 - 39: iIii1I11I1II1 - OoooooooOO
return
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
iiIiI = "No reachable RLOCs found"
lisp . dprint ( iiIiI )
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = iiIiI )
return
if 87 - 87: ooOoO0o - OoooooooOO + i11iIiiIii
if ( OO0OoOOO0 and OO0OoOOO0 . is_null ( ) ) :
iiIiI = "Drop action RLOC found"
lisp . dprint ( iiIiI )
if 73 - 73: I11i * OoooooooOO . O0 . IiII
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = iiIiI )
return
if 55 - 55: Oo0Ooo
if 77 - 77: II111iiii
if 16 - 16: I1IiiI * II111iiii / iIii1I11I1II1 - iII111i
if 3 - 3: I1IiiI * ooOoO0o + II111iiii - OoO0O00
if 97 - 97: I1ii11iIi11i / oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
packet . outer_tos = packet . inner_tos
packet . outer_ttl = 32 if ( OoO ) else packet . inner_ttl
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if ( OO0OoOOO0 ) :
packet . outer_dest . copy_address ( OO0OoOOO0 )
iI = packet . outer_dest . afi_to_version ( )
packet . outer_version = iI
i11ii = lisp . lisp_myrlocs [ 0 ] if ( iI == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 50 - 50: Ii1I / OoOoOO00 * Ii1I
packet . outer_source . copy_address ( i11ii )
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet , rloc_entry = ii1i1i ) == False ) : return
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if ( packet . encode ( oOO0o00O ) == None ) : return
if ( len ( packet . packet ) <= 1500 ) : packet . print_packet ( "Send" , True )
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
o0OooooOoOO = oOOoo00O0O if iI == 6 else i1
packet . send_packet ( o0OooooOoOO , packet . outer_dest )
if 19 - 19: IiII
elif ( IIIIiI1iiiIiii ) :
if 78 - 78: OOooOOo % o0oOOo0O0Ooo
if 39 - 39: I1ii11iIi11i + I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo
if 7 - 7: IiII . OoOoOO00 / I1ii11iIi11i . OOooOOo * I11i - II111iiii
if 37 - 37: I1Ii111 . OoOoOO00 / O0 * iII111i
if 7 - 7: OoO0O00 * I11i + II111iiii % i11iIiiIii
i1i1IiIiIi1Ii = IIIIiI1iiiIiii . rle_nodes [ 0 ] . level
oO0ooOO = len ( packet . packet )
for IIi1iI1 in IIIIiI1iiiIiii . rle_forwarding_list :
if ( IIi1iI1 . level != i1i1IiIiIi1Ii ) : return
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
packet . outer_dest . copy_address ( IIi1iI1 . address )
if ( oO0OO0 ) : packet . inner_dest . instance_id = 0xffffff
iI = packet . outer_dest . afi_to_version ( )
packet . outer_version = iI
i11ii = lisp . lisp_myrlocs [ 0 ] if ( iI == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
packet . outer_source . copy_address ( i11ii )
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet ) == False ) : return
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if ( packet . encode ( None ) == None ) : return
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
packet . print_packet ( "Replicate-to-L{}" . format ( IIi1iI1 . level ) , True )
packet . send_packet ( i1 , packet . outer_dest )
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
I11i1iIiiIiIi = len ( packet . packet ) - oO0ooOO
packet . packet = packet . packet [ I11i1iIiiIiIi : : ]
if 49 - 49: OOooOOo . I1ii11iIi11i . i11iIiiIii - II111iiii / Ii1I
if 62 - 62: OOooOOo
if 1 - 1: IiII / IiII - i11iIiiIii
if 87 - 87: Oo0Ooo / O0 * IiII / o0oOOo0O0Ooo
if 19 - 19: I1Ii111 + i1IIi . I1IiiI - Oo0Ooo
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
del ( packet )
return
if 82 - 82: IiII * i11iIiiIii % II111iiii - OoooooooOO
if 90 - 90: Oo0Ooo . oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
if 14 - 14: iIii1I11I1II1 * I1Ii111 * I1ii11iIi11i / iIii1I11I1II1 * IiII / I11i
if 77 - 77: OoO0O00 + I1Ii111 + I1Ii111 * Ii1I / OoooooooOO . Ii1I
if 62 - 62: i1IIi - i1IIi
if 69 - 69: OoOoOO00 % oO0o - I11i
def Iiii1ii ( device , not_used , packet ) :
I1i111IiIiIi1 = 4 if device == "lo0" else 0 if device == "lispers.net" else 14
if 39 - 39: I11i - I1ii11iIi11i
if ( lisp . lisp_frame_logging ) :
OOO0o0OO0OO = lisp . bold ( "Received frame on interface '{}'" . format ( device ) ,
False )
oOo0O = lisp . lisp_format_packet ( packet [ 0 : 64 ] )
lisp . lprint ( "{}: {}" . format ( OOO0o0OO0OO , oOo0O ) )
if 43 - 43: o0oOOo0O0Ooo . iII111i . I11i + iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 % OoOoOO00 + I1ii11iIi11i / i1IIi % II111iiii + OOooOOo
if 91 - 91: iIii1I11I1II1 % OoO0O00 . o0oOOo0O0Ooo + Ii1I + o0oOOo0O0Ooo
if 95 - 95: Ii1I + I1ii11iIi11i * OOooOOo
if 16 - 16: I11i / I1IiiI + OoO0O00 % iIii1I11I1II1 - i1IIi . oO0o
iIi1iIIIiIiI = ""
OooOo000o0o = False
OooOoOO0 = device
if ( I1i111IiIiIi1 == 14 ) :
oO , iI1I1iII1i , iiIIii , OooOo000o0o = lisp . lisp_get_input_interface ( packet )
OooOoOO0 = device if ( device in oO ) else oO [ 0 ]
iIi1iIIIiIiI = lisp . lisp_format_macs ( iI1I1iII1i , iiIIii )
if ( OooOoOO0 . find ( "vlan" ) != - 1 ) : I1i111IiIiIi1 += 4
if 70 - 70: o0oOOo0O0Ooo - OOooOOo
if 62 - 62: I11i
if 63 - 63: OOooOOo + ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if ( int ( iiIIii [ 1 ] , 16 ) & 1 ) : OooOo000o0o = True
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if ( I1i111IiIiIi1 != 0 ) :
i1iii11 = struct . unpack ( "H" , packet [ I1i111IiIiIi1 - 2 : I1i111IiIiIi1 ] ) [ 0 ]
i1iii11 = socket . ntohs ( i1iii11 )
if ( i1iii11 == 0x8100 ) :
oOo0O0o0000o0O0 = struct . unpack ( "I" , packet [ I1i111IiIiIi1 : I1i111IiIiIi1 + 4 ] ) [ 0 ]
oOo0O0o0000o0O0 = socket . ntohl ( oOo0O0o0000o0O0 )
OooOoOO0 = "vlan" + str ( oOo0O0o0000o0O0 >> 16 )
I1i111IiIiIi1 += 4
elif ( i1iii11 == 0x806 ) :
lisp . dprint ( "Dropping ARP packets, host should have default route" )
return
if 53 - 53: I1Ii111
if 69 - 69: OoOoOO00 . o0oOOo0O0Ooo . I1IiiI - I1ii11iIi11i
if 32 - 32: OoooooooOO / I1IiiI / iIii1I11I1II1 + II111iiii . oO0o . o0oOOo0O0Ooo
if ( lisp . lisp_l2_overlay ) : I1i111IiIiIi1 = 0
if 21 - 21: iIii1I11I1II1 / II111iiii % i1IIi
III ( packet [ I1i111IiIiIi1 : : ] , device , OooOoOO0 , iIi1iIIIiIiI , OooOo000o0o )
return
if 8 - 8: OoO0O00 + OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
def O0OoO0ooOO0o ( sources , dyn_eids ) :
if ( os . getenv ( "LISP_NO_IPTABLES" ) != None ) :
lisp . lprint ( "User selected to suppress installing iptables rules" )
return
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
os . system ( "sudo iptables -t raw -N lisp" )
os . system ( "sudo iptables -t raw -A PREROUTING -j lisp" )
os . system ( "sudo ip6tables -t raw -N lisp" )
os . system ( "sudo ip6tables -t raw -A PREROUTING -j lisp" )
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if 58 - 58: IiII + iIii1I11I1II1
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
if 69 - 69: ooOoO0o - o0oOOo0O0Ooo . ooOoO0o
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
oOII1ii1ii11I1 = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
o0ooOO0o = [ "127.0.0.1" , "::1" , "224.0.0.0/4 -p igmp" , "ff00::/8" ,
"fe80::/16" ]
o0ooOO0o += sources + lisp . lisp_get_all_addresses ( )
for ooo0i1iI1i1I1 in o0ooOO0o :
if ( lisp . lisp_is_mac_string ( ooo0i1iI1i1I1 ) ) : continue
O0Oo0 = "" if ooo0i1iI1i1I1 . find ( ":" ) == - 1 else "6"
os . system ( oOII1ii1ii11I1 . format ( O0Oo0 , ooo0i1iI1i1I1 ) )
if 80 - 80: I1IiiI - iIii1I11I1II1 . OOooOOo + OoO0O00 - I1Ii111
if 5 - 5: iII111i
if 62 - 62: OoOoOO00 . OoooooooOO . OOooOOo . OoO0O00 * iII111i
if 78 - 78: oO0o / OoO0O00 - oO0o * OoooooooOO . OoOoOO00
if 96 - 96: I1IiiI % i1IIi . o0oOOo0O0Ooo . O0
if 37 - 37: i1IIi - OOooOOo % OoooooooOO / OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
if ( lisp . lisp_pitr == False ) :
oOII1ii1ii11I1 = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
oOo00Ooo0o0 = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for IIi1IIIIi in sources :
if ( lisp . lisp_is_mac_string ( IIi1IIIIi ) ) : continue
if ( IIi1IIIIi in dyn_eids ) : continue
O0Oo0 = "" if IIi1IIIIi . find ( ":" ) == - 1 else "6"
for o00OO00OoO in sources :
if ( lisp . lisp_is_mac_string ( o00OO00OoO ) ) : continue
if ( o00OO00OoO in dyn_eids ) : continue
if ( o00OO00OoO . find ( "." ) != - 1 and IIi1IIIIi . find ( "." ) == - 1 ) : continue
if ( o00OO00OoO . find ( ":" ) != - 1 and IIi1IIIIi . find ( ":" ) == - 1 ) : continue
if ( getoutput ( oOo00Ooo0o0 . format ( O0Oo0 , IIi1IIIIi , o00OO00OoO ) ) == "" ) :
continue
if 33 - 33: I11i
os . system ( oOII1ii1ii11I1 . format ( O0Oo0 , IIi1IIIIi , o00OO00OoO ) )
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
OOo = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for IIi1IIIIi in sources :
if ( lisp . lisp_is_mac_string ( IIi1IIIIi ) ) : continue
O0Oo0 = "" if IIi1IIIIi . find ( ":" ) == - 1 else "6"
os . system ( OOo . format ( O0Oo0 , IIi1IIIIi ) )
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
if 65 - 65: OoOoOO00
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
Ii1iiIi1I11i = getoutput ( "sudo iptables -t raw -S lisp" ) . split ( "\n" )
Ii1iiIi1I11i += getoutput ( "sudo ip6tables -t raw -S lisp" ) . split ( "\n" )
lisp . lprint ( "Using kernel filters: {}" . format ( Ii1iiIi1I11i ) )
if 89 - 89: I1Ii111 . IiII % Oo0Ooo . Oo0Ooo - OoooooooOO
if 56 - 56: I11i
if 21 - 21: iIii1I11I1II1 / I1Ii111 + ooOoO0o - I11i / Oo0Ooo / II111iiii
if 69 - 69: I1IiiI . OoOoOO00
if 53 - 53: I11i
if 68 - 68: oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if ( os . getenv ( "LISP_VIRTIO_BUG" ) != None ) :
oOo00o = ( "sudo iptables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 98 - 98: OOooOOo % i1IIi . I1IiiI . II111iiii . I1ii11iIi11i / i11iIiiIii
oOo00o += ( "sudo iptables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill; " )
if 32 - 32: o0oOOo0O0Ooo + I1IiiI . I1Ii111
oOo00o += ( "sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 41 - 41: OoOoOO00 . i11iIiiIii / I11i
oOo00o += ( "sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill" )
if 98 - 98: OoOoOO00 % II111iiii
os . system ( oOo00o )
OoO0O000 = lisp . bold ( "virtio" , False )
lisp . lprint ( "{} bug workaround, configure '{}'" . format ( OoO0O000 , oOo00o ) )
if 14 - 14: OoO0O00 / OoO0O00 * O0 . oO0o
return
if 59 - 59: II111iiii * i11iIiiIii
if 54 - 54: O0 % OoooooooOO - I1IiiI
if 61 - 61: Oo0Ooo * IiII . Oo0Ooo + Oo0Ooo / IiII * O0
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
def iII1iii ( sources , dyn_eids , l2_overlay , pitr ) :
if ( l2_overlay ) :
ooo0O = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp . lprint ( "Using pcap filter: '{}'" . format ( ooo0O ) )
return ( ooo0O )
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
iiiiI1IiI1I1 = "(not ether proto 0x806)"
oOooO0 = " or (udp src port 4342 and ip[28] == 0x28)"
iI111i11iI1 = " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 2 - 2: OoOoOO00 + I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
i11I1I = ""
oo0ooooo00o = ""
for IIi1IIIIi in sources :
OoOo = IIi1IIIIi
if ( lisp . lisp_is_mac_string ( IIi1IIIIi ) ) :
OoOo = IIi1IIIIi . split ( "/" ) [ 0 ]
OoOo = OoOo . replace ( "-" , "" )
i111i1iIi1 = [ ]
for IIiiii in range ( 0 , 12 , 2 ) : i111i1iIi1 . append ( OoOo [ IIiiii : IIiiii + 2 ] )
OoOo = "ether host " + ":" . join ( i111i1iIi1 )
if 95 - 95: OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i1IIi . OoooooooOO
if 29 - 29: ooOoO0o - i1IIi . I11i - I1ii11iIi11i + ooOoO0o + OoooooooOO
i11I1I += "{}" . format ( OoOo )
if ( IIi1IIIIi not in dyn_eids ) : oo0ooooo00o += "{}" . format ( OoOo )
if ( sources [ - 1 ] == IIi1IIIIi ) : break
i11I1I += " or "
if ( IIi1IIIIi not in dyn_eids ) : oo0ooooo00o += " or "
if 36 - 36: i1IIi / ooOoO0o . iIii1I11I1II1
if ( oo0ooooo00o [ - 4 : : ] == " or " ) : oo0ooooo00o = oo0ooooo00o [ 0 : - 4 ]
if 12 - 12: Ii1I
if 71 - 71: I1IiiI . II111iiii . I1IiiI - ooOoO0o
if 45 - 45: IiII / O0 / OoOoOO00 * OOooOOo
if 18 - 18: iIii1I11I1II1 + OOooOOo + iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
oOOoo = getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
oOOoo = ( oOOoo != "" and oOOoo [ 0 ] == " " )
i1I1iIii11 = lisp . lisp_get_loopback_address ( ) if ( oOOoo ) else None
if 80 - 80: OoOoOO00 - II111iiii
I1iI1IiI = ""
i1i1Ii1I = lisp . lisp_get_all_addresses ( )
for ooo0i1iI1i1I1 in i1i1Ii1I :
if ( ooo0i1iI1i1I1 == i1I1iIii11 ) : continue
I1iI1IiI += "{}" . format ( ooo0i1iI1i1I1 )
if ( i1i1Ii1I [ - 1 ] == ooo0i1iI1i1I1 ) : break
I1iI1IiI += " or "
if 11 - 11: OOooOOo - OoOoOO00 - o0oOOo0O0Ooo * OoOoOO00 + ooOoO0o
if 62 - 62: I1IiiI * i11iIiiIii . iII111i
if ( i11I1I != "" ) :
i11I1I = " and (src net {})" . format ( i11I1I )
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if ( oo0ooooo00o != "" ) :
oo0ooooo00o = " and not (dst net {})" . format ( oo0ooooo00o )
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if ( I1iI1IiI != "" ) :
I1iI1IiI = " and not (dst host {})" . format ( I1iI1IiI )
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if ( pitr ) :
oo0ooooo00o = ""
I1iI1IiI = I1iI1IiI . replace ( "dst " , "" )
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if 81 - 81: IiII / OoOoOO00 * IiII . O0
ooo0O = iiiiI1IiI1I1 + i11I1I + oo0ooooo00o + I1iI1IiI
ooo0O += oOooO0
ooo0O += iI111i11iI1
if 61 - 61: OoO0O00 * OOooOOo + I1Ii111 . iIii1I11I1II1 % I11i . I1Ii111
lisp . lprint ( "Using pcap filter: '{}'" . format ( ooo0O ) )
return ( ooo0O )
if 53 - 53: I1Ii111 * IiII / iIii1I11I1II1 / I1IiiI % I1ii11iIi11i
if 39 - 39: OoO0O00 / OoooooooOO . OoO0O00 * I1ii11iIi11i / OoOoOO00
if 38 - 38: OoO0O00 / ooOoO0o % I1Ii111 * I11i + i11iIiiIii % ooOoO0o
if 61 - 61: I1Ii111 - Ii1I % I1ii11iIi11i / ooOoO0o / iII111i + iIii1I11I1II1
if 87 - 87: I1Ii111 + ooOoO0o + O0 / i1IIi % IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
def Iiiiii1iI ( device , pfilter , pcap_lock ) :
lisp . lisp_set_exception ( )
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if ( lisp . lisp_is_python2 ( ) ) :
pcap_lock . acquire ( )
O0000oO0o00 = pcappy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
O0000oO0o00 . filter = pfilter
O0000oO0o00 . loop ( - 1 , Iiii1ii , device )
if 80 - 80: OoooooooOO + IiII
if ( lisp . lisp_is_python3 ( ) ) :
pcap_lock . acquire ( )
O0000oO0o00 = pcapy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
O0000oO0o00 . setfilter ( pfilter )
O0000oO0o00 . loop ( - 1 , Iiii1ii )
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
return
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
def i111i1I1ii1i ( ) :
global i11
global I1Ii11I1Ii1i
global Oo0o
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
lisp . lisp_set_exception ( )
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
o00oo0OO0 = [ I1Ii11I1Ii1i , I1Ii11I1Ii1i ,
OOO0o0o ]
lisp . lisp_build_info_requests ( o00oo0OO0 , None , lisp . LISP_CTRL_PORT )
if 60 - 60: ooOoO0o
if 66 - 66: I11i / ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
i11 . cancel ( )
i11 = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
i111i1I1ii1i , [ ] )
i11 . start ( )
return
if 29 - 29: iIii1I11I1II1 - OoO0O00 + I1IiiI % iIii1I11I1II1 % OOooOOo
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
def i1I1Ii11i ( kv_pair ) :
global Oo0o
global Ooo
global i11
if 19 - 19: IiII - o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00 / OOooOOo
lispconfig . lisp_map_resolver_command ( kv_pair )
if 87 - 87: OoOoOO00 - ooOoO0o - OOooOOo + Oo0Ooo % iIii1I11I1II1 / i11iIiiIii
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ Oo0o , Ooo ] )
lisp . lisp_test_mr_timer . start ( )
if 12 - 12: ooOoO0o
if 86 - 86: oO0o - OoO0O00
if 63 - 63: I1IiiI / OoOoOO00 + OoooooooOO . I11i . ooOoO0o
if 48 - 48: i1IIi - iII111i - i11iIiiIii . I11i - iII111i * I11i
if 60 - 60: OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
i11 = threading . Timer ( 0 , i111i1I1ii1i , [ ] )
i11 . start ( )
return
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
def OoOooOO0oOOo0O ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
return
if 42 - 42: iII111i / o0oOOo0O0Ooo + Oo0Ooo . Oo0Ooo % OOooOOo
if 16 - 16: i1IIi + OoO0O00 % OoOoOO00 + Ii1I * Oo0Ooo
if 3 - 3: i11iIiiIii
if 81 - 81: I1IiiI . OoooooooOO * Ii1I . oO0o - O0 * oO0o
if 72 - 72: II111iiii - OOooOOo + I1IiiI - I11i
if 91 - 91: II111iiii
if 53 - 53: OoO0O00 % o0oOOo0O0Ooo / OOooOOo % IiII % OoO0O00 % OoooooooOO
if 31 - 31: I1IiiI
def O0o ( kv_pair ) :
global Oo
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
o0 = lisp . lisp_nat_traversal
iIIIIi = lisp . lisp_rloc_probing
if 50 - 50: I1Ii111 + ooOoO0o + iII111i
if 15 - 15: I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
lispconfig . lisp_xtr_command ( kv_pair )
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
III11I1OOOO0o0O = ( o0 == False and lisp . lisp_nat_traversal and lisp . lisp_rloc_probing )
if 41 - 41: o0oOOo0O0Ooo + ooOoO0o
O00O00OoO = ( iIIIIi == False and lisp . lisp_rloc_probing )
if 20 - 20: O0 - OoooooooOO - IiII + iIii1I11I1II1
o0II1IIi1iII1i = 0
if ( O00O00OoO ) : o0II1IIi1iII1i = 1
if ( III11I1OOOO0o0O ) : o0II1IIi1iII1i = 5
if 26 - 26: O0
if ( o0II1IIi1iII1i != 0 ) :
iiiIi = [ Oo , Oo ]
lisp . lisp_start_rloc_probe_timer ( o0II1IIi1iII1i , iiiIi )
if 62 - 62: O0 . Oo0Ooo
if 33 - 33: Oo0Ooo / iIii1I11I1II1 % i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
if ( lisp . lisp_crypto_ephem_port == None and lisp . lisp_data_plane_security ) :
OOOoO = Oo . getsockname ( ) [ 1 ]
lisp . lisp_crypto_ephem_port = OOOoO
lisp . lprint ( "Use port {} for lisp-crypto packets" . format ( OOOoO ) )
O0oooOoO = { "type" : "itr-crypto-port" , "port" : OOOoO }
lisp . lisp_write_to_dp_socket ( O0oooOoO )
if 62 - 62: OOooOOo / II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
if 45 - 45: OoooooooOO
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
def OO0OOoooo0o ( ipc ) :
IiIi1Ii , iiIIiI11II1 , oooOo , oOO0o00O = ipc . split ( "%" )
oOO0o00O = int ( oOO0o00O , 16 )
if 79 - 79: oO0o - II111iiii
Ii1iiI1 = lisp . lisp_get_echo_nonce ( None , oooOo )
if ( Ii1iiI1 == None ) : Ii1iiI1 = lisp . lisp_echo_nonce ( oooOo )
if 76 - 76: Ii1I * iIii1I11I1II1
if 31 - 31: i11iIiiIii + OOooOOo - O0
if 51 - 51: OoO0O00 * i1IIi / Ii1I * OOooOOo + ooOoO0o % I1ii11iIi11i
if 34 - 34: oO0o * OoooooooOO + Ii1I + i11iIiiIii
if 22 - 22: i1IIi
if ( iiIIiI11II1 == "R" ) :
Ii1iiI1 . request_nonce_rcvd = oOO0o00O
Ii1iiI1 . last_request_nonce_rcvd = lisp . lisp_get_timestamp ( )
Ii1iiI1 . echo_nonce_sent = oOO0o00O
Ii1iiI1 . last_new_echo_nonce_sent = lisp . lisp_get_timestamp ( )
lisp . lprint ( "Start echo-nonce mode for {}, nonce 0x{}" . format ( lisp . red ( Ii1iiI1 . rloc_str , False ) , lisp . lisp_hex_string ( oOO0o00O ) ) )
if 24 - 24: I11i / I1IiiI * i1IIi % OoooooooOO
if 99 - 99: i11iIiiIii . II111iiii . OoooooooOO
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if ( iiIIiI11II1 == "E" ) :
Ii1iiI1 . echo_nonce_rcvd = oOO0o00O
Ii1iiI1 . last_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
if ( Ii1iiI1 . request_nonce_sent == oOO0o00O ) :
O00oo00oOOO0o = lisp . bold ( "echoed nonce" , False )
lisp . lprint ( "Received {} {} from {}" . format ( O00oo00oOOO0o ,
lisp . lisp_hex_string ( oOO0o00O ) ,
lisp . red ( Ii1iiI1 . rloc_str , False ) ) )
if 5 - 5: o0oOOo0O0Ooo / I1IiiI % Ii1I . IiII
Ii1iiI1 . request_nonce_sent = None
lisp . lprint ( "Stop request-nonce mode for {}" . format ( lisp . red ( Ii1iiI1 . rloc_str , False ) ) )
if 86 - 86: i1IIi * OoOoOO00 . O0 - Ii1I - o0oOOo0O0Ooo - OoOoOO00
Ii1iiI1 . last_good_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
else :
i11IiI = "none"
if ( Ii1iiI1 . request_nonce_sent ) :
i11IiI = lisp . lisp_hex_string ( Ii1iiI1 . request_nonce_sent )
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
lisp . lprint ( ( "Received echo-nonce 0x{} from {}, but request-" + "nonce is {}" ) . format ( lisp . lisp_hex_string ( oOO0o00O ) ,
# Oo0Ooo * iIii1I11I1II1 - OoO0O00 . Oo0Ooo
lisp . red ( Ii1iiI1 . rloc_str , False ) , i11IiI ) )
if 59 - 59: OoO0O00 - OoO0O00 + iII111i
if 32 - 32: i1IIi / Oo0Ooo - O0
return
if 85 - 85: Ii1I - O0 * i11iIiiIii . i1IIi
if 20 - 20: iII111i / OOooOOo
if 28 - 28: ooOoO0o * I11i % i11iIiiIii * iII111i / Ii1I
if 41 - 41: OOooOOo - o0oOOo0O0Ooo + Ii1I
if 15 - 15: I11i / o0oOOo0O0Ooo + Ii1I
O0oo00o = {
"lisp xtr-parameters" : [ O0o , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"multi-tenant-eid" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-device" : [ True ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ i1I1Ii11i , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-server" : [ lispconfig . lisp_map_server_command , {
"ms-name" : [ True ] ,
"address" : [ True ] ,
"dns-name" : [ True ] ,
"authentication-type" : [ False , "sha1" , "sha2" ] ,
"authentication-key" : [ False ] ,
"encryption-key" : [ False ] ,
"proxy-reply" : [ False , "yes" , "no" ] ,
"want-map-notify" : [ False , "yes" , "no" ] ,
"merge-registrations" : [ False , "yes" , "no" ] ,
"refresh-registrations" : [ False , "yes" , "no" ] ,
"site-id" : [ False , 1 , 0xffffffffffffffff ] } ] ,
"lisp database-mapping" : [ OoOooOO0oOOo0O , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"register-ttl" : [ True , 1 , 0xffffffff ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"subscribe-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp itr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"show itr-map-cache" : [ Ii1 , { } ] ,
"show itr-rloc-probing" : [ IIII , { } ] ,
"show itr-keys" : [ oo , { } ] ,
"show itr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 45 - 45: I1ii11iIi11i + I1Ii111 . iII111i . iII111i
if 34 - 34: OoO0O00 % o0oOOo0O0Ooo % I1IiiI
if 3 - 3: OoooooooOO * I1IiiI * oO0o - IiII - ooOoO0o
if 21 - 21: OoooooooOO - I1ii11iIi11i . OoOoOO00
if 90 - 90: iIii1I11I1II1
if 56 - 56: OOooOOo
if ( O00o0OO0 ( ) == False ) :
lisp . lprint ( "lisp_itr_startup() failed" )
lisp . lisp_print_banner ( "ITR abnormal exit" )
exit ( 1 )
if 49 - 49: ooOoO0o . II111iiii
if 24 - 24: O0 . OoooooooOO - OoO0O00 * OoooooooOO
Ii11iiI = [ Oo , OOO0o0o ,
I1Ii11I1Ii1i , Ii1iI ]
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
II1iII1i1i = True
o00oO0O0oo0o = [ Oo ] * 3
iIi11I11 = [ I1Ii11I1Ii1i ] * 3
if 40 - 40: iIii1I11I1II1
while ( True ) :
try : oOoOo0o00o , iIIi1 , IiIi1Ii = select . select ( Ii11iiI , [ ] , [ ] )
except : break
if 68 - 68: Oo0Ooo
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if ( lisp . lisp_ipc_data_plane and Ii1iI in oOoOo0o00o ) :
lisp . lisp_process_punt ( Ii1iI , Oo0o ,
Ooo )
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if 94 - 94: i1IIi
if 36 - 36: I1IiiI + Oo0Ooo
if 46 - 46: iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if ( Oo in oOoOo0o00o ) :
iiIIiI11II1 , IIi1IIIIi , OOOoO , I1i1I11111iI1 = lisp . lisp_receive ( o00oO0O0oo0o [ 0 ] ,
False )
if ( IIi1IIIIi == "" ) : break
if 32 - 32: I1IiiI + I1ii11iIi11i - oO0o + I1ii11iIi11i / i1IIi * oO0o
if ( lisp . lisp_is_rloc_probe_reply ( I1i1I11111iI1 [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 90 - 90: Ii1I % oO0o
lisp . lisp_parse_packet ( o00oO0O0oo0o , I1i1I11111iI1 , IIi1IIIIi , OOOoO )
if 6 - 6: OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if ( I1Ii11I1Ii1i in oOoOo0o00o ) :
iiIIiI11II1 , IIi1IIIIi , OOOoO , I1i1I11111iI1 = lisp . lisp_receive ( iIi11I11 [ 0 ] ,
False )
if ( IIi1IIIIi == "" ) : break
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if ( lisp . lisp_is_rloc_probe_reply ( I1i1I11111iI1 [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 25 - 25: II111iiii / OoO0O00
oo0OoOO0000 = lisp . lisp_parse_packet ( iIi11I11 , I1i1I11111iI1 , IIi1IIIIi , OOOoO )
if 2 - 2: Ii1I * I1ii11iIi11i * OoooooooOO
if 73 - 73: OoOoOO00 + Oo0Ooo
if 61 - 61: iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if ( oo0OoOO0000 ) :
iiiIi = [ Oo , Oo ]
lisp . lisp_start_rloc_probe_timer ( 0 , iiiIi )
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if ( OOO0o0o in oOoOo0o00o ) :
iiIIiI11II1 , IIi1IIIIi , OOOoO , I1i1I11111iI1 = lisp . lisp_receive ( OOO0o0o , True )
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if ( IIi1IIIIi == "" ) : break
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if ( iiIIiI11II1 == "command" ) :
if ( I1i1I11111iI1 == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if ( I1i1I11111iI1 . find ( "nonce%" ) != - 1 ) :
OO0OOoooo0o ( I1i1I11111iI1 )
continue
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
lispconfig . lisp_process_command ( OOO0o0o , iiIIiI11II1 ,
I1i1I11111iI1 , "lisp-itr" , [ O0oo00o ] )
elif ( iiIIiI11II1 == "api" ) :
lisp . lisp_process_api ( "lisp-itr" , OOO0o0o , I1i1I11111iI1 )
elif ( iiIIiI11II1 == "data-packet" ) :
III ( I1i1I11111iI1 , "ipc" )
else :
if ( lisp . lisp_is_rloc_probe_reply ( I1i1I11111iI1 [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe request, using pcap" )
continue
if 68 - 68: O0
lisp . lisp_parse_packet ( Oo0o , I1i1I11111iI1 , IIi1IIIIi , OOOoO )
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
if 89 - 89: oO0o
IiI11i1IIiiI ( )
lisp . lisp_print_banner ( "ITR normal exit" )
exit ( 0 )
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
softmax_mpi.py
|
#!/usr/bin/env python
"""
DMLC submission script, MPI version
"""
import argparse
import sys
import os
import subprocess
import tracker
from threading import Thread
parser = argparse.ArgumentParser(description='DMLC script to submit dmlc job using MPI')
parser.add_argument('-n', '--nworker', required=True, type=int,
help = 'number of worker proccess to be launched')
parser.add_argument('-s', '--server-nodes', default = 0, type=int,
help = 'number of server nodes to be launched')
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help = 'logging level')
parser.add_argument('--log-file', type=str,
help = 'output log to the specific log file')
parser.add_argument('-S', '--serverhost', type=str,
help = 'the hostfile of mpi server')
parser.add_argument('-W', '--workerhost', type=str,
help = 'the hostfile of mpi worker')
parser.add_argument('command', nargs='+',
help = 'command for dmlc program')
parser.add_argument('--host-ip', type=str,
help = 'the scheduler ip', default='ip')
args, unknown = parser.parse_known_args()
#
# submission script using MPI
#
def get_mpi_env(envs):
"""get the mpirun command for setting the envornment
support both openmpi and mpich2
"""
outfile="/tmp/mpiver"
os.system("mpirun -version 1>/tmp/mpiver 2>/tmp/mpiver")
with open (outfile, "r") as infile:
mpi_ver = infile.read()
cmd = ''
if 'Open MPI' in mpi_ver:
for k, v in envs.items():
cmd += ' -x %s=%s' % (k, str(v))
elif 'mpich' in mpi_ver:
for k, v in envs.items():
cmd += ' -env %s %s' % (k, str(v))
else:
raise Exception('unknow mpi version %s' % (mpi_ver))
return cmd
def mpi_submit(nworker, nserver, pass_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nworker number of slave process to start up
nserver number of server nodes to start up
pass_envs enviroment variables to be added to the starting programs
"""
def run(prog):
""""""
subprocess.check_call(prog, shell = True)
scmd = ''
if args.serverhost is not None:
scmd = '--hostfile %s' % (args.serverhost)
scmd += ' ' + ' '.join(args.command) + ' ' + ' '.join(unknown)
wcmd = ''
if args.workerhost is not None:
wcmd = '--hostfile %s' % (args.workerhost)
wcmd += ' ' + ' '.join(args.command) + ' ' + ' '.join(unknown)
"""
SYNC_MODE:
1: sync
2: semi-sync without vr
3: semi-sync with vr
"""
pass_envs['EVAL'] = 1
pass_envs['EVAL_FILE'] = '/home/cx2/ClionProjects/ps-lite-new/examples/LR_proximal/script/a9a-data/softmax_weight_track_sync3_20170324193853'
pass_envs['SYNC_MODE'] = 3
pass_envs['TRAIN_DIR'] = '/home/cx2/ClionProjects/ps-lite-new/examples/LR_proximal/script/a9a-data/train/part-'
pass_envs['TEST_FILE'] = '/home/cx2/ClionProjects/ps-lite-new/examples/LR_proximal/script/a9a-data/test/part-001'
pass_envs['NSAMPLES'] = 32561
pass_envs['NUM_CLASS'] = 2
pass_envs['NUM_FEATURE_DIM'] = 123
pass_envs['NUM_ITERATION'] = 20
pass_envs['SAVE_PREFIX'] = '/home/cx2/ClionProjects/ps-lite-new/examples/LR_proximal/script/a9a-data/softmax_weight_track_'
pass_envs['PROXIMAL'] = 'l1'
pass_envs['LAMBDA'] = 0.01
pass_envs['TAU'] = 400
pass_envs['GD_DELAY_MSG'] = 10
pass_envs['GD_RESEND_DELAY'] = 1000
# start servers
if nserver > 0:
pass_envs['LEARNING_RATE'] = 0.1
pass_envs['DMLC_ROLE'] = 'server'
prog = 'mpirun -n %d %s %s' % (nserver, get_mpi_env(pass_envs), scmd)
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
if nworker > 0:
pass_envs['BATCH_SIZE'] = 100
pass_envs['DMLC_ROLE'] = 'worker'
prog = 'mpirun -n %d %s %s' % (nworker, get_mpi_env(pass_envs), wcmd)
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
tracker.config_logger(args)
tracker.submit(args.nworker, args.server_nodes, fun_submit = mpi_submit,
hostIP=args.host_ip,
pscmd=(' '.join(args.command) + ' ' + ' '.join(unknown)))
|
wallet.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import inspect
import itertools
import os
import queue
import random
import threading
import time
from collections import defaultdict, namedtuple
from enum import Enum, auto
from functools import partial
from typing import Set, Tuple, Union
from .i18n import ngettext
from .util import (NotEnoughFunds, ExcessiveFee, PrintError, UserCancelled, profiler, format_satoshis, format_time,
finalization_print_error, to_string)
from .address import Address, Script, ScriptOutput, PublicKey, OpCodes
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from . import transaction
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import InvoiceStore, PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .contacts import Contacts
from . import cashacct
from . import slp
def _(message): return message
TX_STATUS = [
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
del _
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
self.slp = slp.WalletData(self)
finalization_print_error(self.cashacct) # debug object lifecycle
finalization_print_error(self.slp) # debug object lifecycle
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
self.change_reserved = set(Address.from_string(a) for a in storage.get('change_reserved', ()))
self.change_reserved_default = [Address.from_string(a) for a in storage.get('change_reserved_default', ())]
self.change_unreserved = [Address.from_string(a) for a in storage.get('change_unreserved', ())]
self.change_reserved_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
self.slp.load() # try to load first so we can pick up the remove_transaction hook from load_transactions if need be
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
if self.slp.need_rebuild:
# load failed, must rebuild from self.transactions
self.slp.rebuild()
self.slp.save() # commit changes to self.storage
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slp.rm_tx(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
self.slp.save()
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def save_change_reservations(self):
with self.lock:
self.storage.put('change_reserved_default', [a.to_storage_string() for a in self.change_reserved_default])
self.storage.put('change_reserved', [a.to_storage_string() for a in self.change_reserved])
unreserved = self.change_unreserved + list(self.change_reserved_tmp)
self.storage.put('change_unreserved', [a.to_storage_string() for a in unreserved])
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.slp.clear()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text = None):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def invalidate_address_set_cache(self):
''' This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient.'''
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
''' Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method! '''
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
WalletDelta = namedtuple("WalletDelta", "is_relevant, is_mine, v, fee")
WalletDelta2 = namedtuple("WalletDelta2", WalletDelta._fields + ("spends_coins_mine",))
def get_wallet_delta(self, tx) -> WalletDelta:
return self._get_wallet_delta(tx, ver=1)
def _get_wallet_delta(self, tx, *, ver=1) -> Union[WalletDelta, WalletDelta2]:
""" Effect of tx on wallet """
assert ver in (1, 2)
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
spends_coins_mine = list()
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
prevout_hash = item['prevout_hash']
prevout_n = item['prevout_n']
d = self.txo.get(prevout_hash, {}).get(addr, [])
for n, v, cb in d:
if n == prevout_n:
value = v
if ver == 2:
spends_coins_mine.append(f'{prevout_hash}:{prevout_n}')
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
if ver == 1:
return self.WalletDelta(is_relevant, is_mine, v, fee)
return self.WalletDelta2(is_relevant, is_mine, v, fee, spends_coins_mine)
TxInfo = namedtuple("TxInfo", "tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n")
class StatusEnum(Enum):
Unconfirmed = auto()
NotVerified = auto()
Confirmed = auto()
Signed = auto()
Unsigned = auto()
PartiallySigned = auto()
TxInfo2 = namedtuple("TxInfo2", TxInfo._fields + ("status_enum",))
def get_tx_info(self, tx) -> TxInfo:
""" Return information for a transaction """
return self._get_tx_info(tx, self.get_wallet_delta(tx), ver=1)
def get_tx_extended_info(self, tx) -> Tuple[WalletDelta2, TxInfo2]:
""" Get extended information for a transaction, combined into 1 call (for performance) """
delta2 = self._get_wallet_delta(tx, ver=2)
info2 = self._get_tx_info(tx, delta2, ver=2)
return (delta2, info2)
def _get_tx_info(self, tx, delta, *, ver=1) -> Union[TxInfo, TxInfo2]:
""" get_tx_info implementation """
assert ver in (1, 2)
if isinstance(delta, self.WalletDelta):
is_relevant, is_mine, v, fee = delta
else:
is_relevant, is_mine, v, fee, __ = delta
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
status_enum = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
status_enum = self.StatusEnum.Confirmed
else:
status = _('Not verified')
status_enum = self.StatusEnum.NotVerified
else:
status = _('Unconfirmed')
status_enum = self.StatusEnum.Unconfirmed
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
# NB: this branch will not be taken as has_fee_estimates()
# will always return false since we disabled querying
# the fee histogram as it's useless for BCH anyway.
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
else:
status = _("Signed")
status_enum = self.StatusEnum.Signed
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
if s == 0:
status = _("Unsigned")
status_enum = self.StatusEnum.Unsigned
else:
status =_('Partially signed') + ' (%d/%d)'%(s,r)
status_enum = self.StatusEnum.PartiallySigned
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
if ver == 1:
return self.TxInfo(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n)
assert status_enum is not None
return self.TxInfo2(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n,
status_enum)
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin':txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'slp_token':self.slp.token_info_for_txo(txo), # (token_id_hex, qty) tuple or None
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
if (isInvoice):
confirmed_only = True
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only, exclude_slp=True)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
exclude_slp skips coins that also have SLP tokens on them. This defaults
to True in EC 4.0.10+ in order to prevent inadvertently burning tokens.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
len_before = len(coins)
for x in utxos.values():
if exclude_slp and x['slp_token']:
continue
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_change_addresses(self):
''' Reimplemented in subclasses for wallets that have a change address set/derivation path. '''
return []
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs! Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
''' Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets. '''
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txin):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _n=n, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _n, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
# Unconditionally invoke the SLP handler. Note that it is a fast &
# cheap no-op if this tx's outputs[0] is not an SLP script.
self.slp.add_tx(tx_hash, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
try: self.txi.pop(tx_hash)
except KeyError: self.print_error("tx was not in input history", tx_hash)
try: self.txo.pop(tx_hash)
except KeyError: self.print_error("tx was not in output history", tx_hash)
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
# inform slp subsystem as well
self.slp.rm_tx(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
if self.network and self.network.callback_listener_count("payment_received") > 0:
for _, addr, _ in tx.outputs():
status = self.get_request_status(addr) # returns PR_UNKNOWN quickly if addr has no requests, otherwise returns tuple
if status != PR_UNKNOWN:
status = status[0] # unpack status from tuple
self.network.trigger_callback('payment_received', self, addr, status)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
TxHistory = namedtuple("TxHistory", "tx_hash, height, conf, timestamp, amount, balance")
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append(self.TxHistory(tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab history
h = self.get_history(domain, reverse=True)
out = []
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
fee = self.tx_fees.get(tx_hash)
# we disable fee estimates in BCH for now.
#if fee and self.network and self.network.config.has_fee_estimates():
# size = len(tx.raw)/2
# low_fee = int(self.network.config.dynfee(0)*size/1000)
# is_lowfee = fee < low_fee * 0.5
#else:
# is_lowfee = False
# and instead if it's less than 1.0 sats/B we flag it as low_fee
try:
# NB len(tx.raw) is 2x the byte size as it's hex encoded.
is_lowfee = int(fee) / (int(len(tx.raw)) / 2.0) < 1.0 # if less than 1.0 sats/B, complain. otherwise don't.
except (TypeError, ValueError): # If for some reason fee was None or invalid, just pass on through.
is_lowfee = False
# /
if height < 0:
status = 0
elif height == 0 and is_lowfee:
status = 1
elif height == 0:
status = 2
else:
status = 3
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = _(TX_STATUS[status]) if status < 4 else time_str
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def reserve_change_addresses(self, count, temporary=False):
""" Reserve and return `count` change addresses. In order
of preference, this will return from:
1. addresses 'freed' by `.unreserve_change_address`,
2. addresses in the last 20 (gap limit) of the change list,
3. newly-created addresses.
Of these, only unlabeled, unreserved addresses with no usage history
will be returned. If you pass temporary=False (default), this will
persist upon wallet saving, otherwise with temporary=True the address
will be made available again once the wallet is re-opened.
On non-deterministic wallets, this returns an empty list.
"""
if count <= 0 or not hasattr(self, 'create_new_address'):
return []
with self.lock:
last_change_addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if not last_change_addrs:
# this happens in non-deterministic wallets but the above
# hasattr check should have caught those.
return []
def gen_change():
try:
while True:
yield self.change_unreserved.pop(0)
except IndexError:
pass
for addr in last_change_addrs:
yield addr
while True:
yield self.create_new_address(for_change=True)
result = []
for addr in gen_change():
if ( addr in self.change_reserved
or addr in self.change_reserved_tmp
or self.get_num_tx(addr) != 0
or addr in result):
continue
addr_str = addr.to_storage_string()
if self.labels.get(addr_str):
continue
result.append(addr)
if temporary:
self.change_reserved_tmp.add(addr)
else:
self.change_reserved.add(addr)
if len(result) >= count:
return result
raise RuntimeError("Unable to generate new addresses") # should not happen
def unreserve_change_address(self, addr):
""" Unreserve an addr that was set by reserve_change_addresses, and
also explicitly reschedule this address to be usable by a future
reservation. Unreserving is appropriate when the address was never
actually shared or used in a transaction, and reduces empty gaps in
the change list.
"""
assert addr in self.get_change_addresses()
with self.lock:
self.change_reserved.discard(addr)
self.change_reserved_tmp.discard(addr)
self.change_unreserved.append(addr)
def get_default_change_addresses(self, count):
""" Return `count` change addresses from the default reserved list,
ignoring and removing used addresses. Reserves more as needed.
The same default change addresses keep getting repeated until they are
actually seen as used in a transaction from the network. Theoretically
this could hurt privacy if the user has multiple unsigned transactions
open at the same time, but practically this avoids address gaps for
normal usage. If you need non-repeated addresses, see
`reserve_change_addresses`.
On non-deterministic wallets, this returns an empty list.
"""
result = []
with self.lock:
for addr in list(self.change_reserved_default):
if len(result) >= count:
break
if self.get_num_tx(addr) != 0:
self.change_reserved_default.remove(addr)
continue
result.append(addr)
need_more = count - len(result)
if need_more > 0:
new_addrs = self.reserve_change_addresses(need_more)
self.change_reserved_default.extend(new_addrs)
result.extend(new_addrs)
return result
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
change_addrs = []
if change_addr:
change_addrs = [change_addr]
else:
# Currently the only code that uses this hook is the deprecated
# Cash Shuffle plugin
change_addrs = run_hook("get_change_addrs", self) or []
if not change_addrs:
# hook gave us nothing, so find a change addr from the change
# reservation subsystem
max_change = self.max_change_outputs if self.multiple_change else 1
if self.use_change:
change_addrs = self.get_default_change_addresses(max_change)
else:
change_addrs = []
if not change_addrs:
# For some reason we couldn't get any autogenerated change
# address (non-deterministic wallet?). So, try to find an
# input address that belongs to us.
for inp in inputs:
backup_addr = inp['address']
if self.is_mine(backup_addr):
change_addrs = [backup_addr]
break
else:
# ok, none of the inputs are "mine" (why?!) -- fall back
# to picking first max_change change_addresses that have
# no history
change_addrs = []
for addr in self.get_change_addresses()[-self.gap_limit_for_change:]:
if self.get_num_tx(addr) == 0:
change_addrs.append(addr)
if len(change_addrs) >= max_change:
break
if not change_addrs:
# No unused wallet addresses or no change addresses.
# Fall back to picking ANY wallet address
try:
# Pick a random address
change_addrs = [random.choice(self.get_addresses())]
except IndexError:
change_addrs = [] # Address-free wallet?!
# This should never happen
if not change_addrs:
raise RuntimeError("Can't find a change address!")
assert all(isinstance(addr, Address) for addr in change_addrs)
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs,
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
""" Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. """
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: Union[str, dict, Set[str]]) -> Union[bool, Set[str]]:
""" 'coin' level frozen query. Note: this is set/unset independent of
address-level freezing.
`utxo` is a prevout:n string, or a dict as returned from get_utxos(),
in which case a bool is returned.
`utxo` may also be a set of prevout:n strings in which case a set is
returned which is the intersection of the internal frozen coin sets
and the `utxo` set. """
assert isinstance(utxo, (str, dict, set))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
elif isinstance(utxo, set):
# set is returned
return (self.frozen_coins | self.frozen_coins_tmp) & utxo
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
''' Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable. '''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary = False):
'''Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`. '''
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add( utxo )
def discard(utxo):
self.frozen_coins.discard( utxo )
self.frozen_coins_tmp.discard( utxo )
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation(utxo)
ok += 1
elif isinstance(utxo, dict) and self.is_mine(utxo['address']):
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation(txo)
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
self.network = network
if self.network:
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.save_change_reservations()
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None):
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def cpfp(self, tx, fee, sign_schnorr=None):
''' sign_schnorr is a bool or None for auto '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False, ndata=None):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
if ndata: # used for reusable paycodes
# test if keystore sign_transaction method knows about the `ndata` kwarg
if 'ndata' in inspect.signature(k.sign_transaction, follow_wrapped=True).parameters:
# keystore understands the optional `ndata` kwarg
k.sign_transaction(tx, password, use_cache=use_cache, ndata=ndata)
else:
# keystore does not understand `ndata` (possibly because hw wallet)
raise RuntimeError("Keystore does not understand ndata parameter. Possibly wrong wallet type attemping special operation.")
else: # regular normal operation
k.sign_transaction(tx, password, use_cache=use_cache)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_ui_string()
amount_text = format_satoshis(r['amount'])
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(Hash(addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.change_reserved.clear()
self.change_reserved_default.clear()
self.change_unreserved.clear()
self.change_reserved_tmp.clear()
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_change_reservations()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self, include_change=False):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self, include_change=False):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
from .mnemonic import Mnemonic_Electrum, Mnemonic
if seed_type == 'electrum':
seed = Mnemonic_Electrum('en').make_seed()
else:
seed = Mnemonic('en').make_seed()
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
pool.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler
from six.moves.socketserver import ThreadingUnixStreamServer
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr.lib._i18n import _
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
LOG = logging.getLogger(__name__)
pool_manager_opts = [
oslo_cfg.StrOpt('sock_file',
help=_("Absolute path to socket file that "
"will be used for communication with "
"the Pool Manager daemon"),
default='/run/kuryr/kuryr_manage.sock'),
]
oslo_cfg.CONF.register_opts(pool_manager_opts, "pool_manager")
class UnixDomainHttpServer(ThreadingUnixStreamServer):
pass
class RequestHandler(BaseHTTPRequestHandler):
protocol = "HTTP/1.0"
def do_POST(self):
content_length = int(self.headers.get('Content-Length', 0))
body = self.rfile.read(content_length)
params = dict(jsonutils.loads(body))
if self.path.endswith(constants.VIF_POOL_POPULATE):
trunk_ips = params.get('trunks', None)
num_ports = params.get('num_ports', 1)
if trunk_ips:
try:
self._create_subports(num_ports, trunk_ips)
except Exception:
response = ('Error while populating pool {0} with {1} '
'ports.'.format(trunk_ips, num_ports))
else:
response = ('Ports pool at {0} was populated with {1} '
'ports.'.format(trunk_ips, num_ports))
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
else:
response = 'Trunk port IP(s) missing.'
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
elif self.path.endswith(constants.VIF_POOL_FREE):
trunk_ips = params.get('trunks', None)
if not trunk_ips:
pool = "all"
else:
pool = trunk_ips
try:
self._delete_subports(trunk_ips)
except Exception:
response = 'Error freeing ports pool: {0}.'.format(pool)
else:
response = 'Ports pool belonging to {0} was freed.'.format(
pool)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
else:
response = 'Method not allowed.'
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
def do_GET(self):
content_length = int(self.headers.get('Content-Length', 0))
body = self.rfile.read(content_length)
params = dict(jsonutils.loads(body))
if self.path.endswith(constants.VIF_POOL_LIST):
try:
pools_info = self._list_pools()
except Exception:
response = 'Error listing the pools.'
else:
response = 'Pools:\n{0}'.format(pools_info)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
elif self.path.endswith(constants.VIF_POOL_SHOW):
raw_key = params.get('pool_key', None)
if len(raw_key) != 3:
response = ('Invalid pool key. Proper format is:\n'
'[trunk_ip, project_id, [security_groups]]\n')
else:
pool_key = (raw_key[0], raw_key[1], tuple(sorted(raw_key[2])))
try:
pool_info = self._show_pool(pool_key)
except Exception:
response = 'Error showing pool: {0}.'.format(pool_key)
else:
response = 'Pool {0} ports are:\n{1}'.format(pool_key,
pool_info)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
else:
response = 'Method not allowed.'
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
def _create_subports(self, num_ports, trunk_ips):
try:
drv_project = drivers.PodProjectDriver.get_instance()
drv_subnets = drivers.PodSubnetsDriver.get_instance()
drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
project_id = drv_project.get_project({})
security_groups = drv_sg.get_security_groups({}, project_id)
subnets = drv_subnets.get_subnets([], project_id)
except TypeError as ex:
LOG.error("Invalid driver type")
raise ex
for trunk_ip in trunk_ips:
try:
drv_vif_pool.force_populate_pool(
trunk_ip, project_id, subnets, security_groups, num_ports)
except n_exc.Conflict as ex:
LOG.error("VLAN Id conflict (already in use) at trunk %s",
trunk_ip)
raise ex
except n_exc.NeutronClientException as ex:
LOG.error("Error happened during subports addition at trunk: "
" %s", trunk_ip)
raise ex
def _delete_subports(self, trunk_ips):
try:
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
drv_vif_pool.free_pool(trunk_ips)
except TypeError as ex:
LOG.error("Invalid driver type")
raise ex
def _list_pools(self):
try:
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
available_pools = drv_vif_pool.list_pools()
except TypeError as ex:
LOG.error("Invalid driver type")
raise ex
pools_info = ""
for pool_key, pool_items in available_pools.items():
pools_info += (jsonutils.dumps(pool_key) + " has "
+ str(len(pool_items)) + " ports\n")
if pools_info:
return pools_info
return "There are no pools"
def _show_pool(self, pool_key):
try:
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
pool = drv_vif_pool.show_pool(pool_key)
except TypeError as ex:
LOG.error("Invalid driver type")
raise ex
if pool:
pool_info = ""
for pool_id in pool:
pool_info += str(pool_id) + "\n"
return pool_info
else:
return "Empty pool"
class PoolManager(object):
"""Manages the ports pool enabling population and free actions.
`PoolManager` runs on the Kuryr-kubernetes controller and allows to
populate specific pools with a given amount of ports. In addition, it also
allows to remove all the (unused) ports in the given pool(s), or from all
of the pool if none of them is specified.
"""
def __init__(self):
pool_manager = threading.Thread(target=self._start_kuryr_manage_daemon)
pool_manager.setDaemon(True)
pool_manager.start()
def _start_kuryr_manage_daemon(self):
LOG.info("Pool manager started")
server_address = oslo_cfg.CONF.pool_manager.sock_file
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
try:
httpd = UnixDomainHttpServer(server_address, RequestHandler)
httpd.serve_forever()
except KeyboardInterrupt:
pass
except Exception:
LOG.exception('Failed to start Pool Manager.')
httpd.socket.close()
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal, assert_allclose
from mxnet.base import MXNetError
from mxnet import autograd
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied
from common import run_in_spawned_process
from test_operator import *
from test_numpy_ndarray import *
from test_numpy_op import *
from test_numpy_interoperability import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
#from test_rnn import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
from test_subgraph_op import *
from test_gluon_gpu import _test_bulking
from test_contrib_operator import test_multibox_target_op
from test_tvm_op import *
from test_extensions import *
from test_contrib_optimizer import test_adamw
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm # noqa
del test_support_vector_machine_l2_svm # noqa
del test_custom_op_fork #noqa
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
@with_seed()
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0], rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
def _make_ndarrays(input_list, ctx=mx.gpu(0)):
return [mx.nd.array(arr, dtype=arr.dtype, ctx=ctx) for arr in input_list]
def check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2):
values_arr = [np.random.rand(*shape).astype(dtype) * 10. for shape in shapes]
mx_vals = _make_ndarrays(values_arr, ctx=ctx)
sum_sq = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
sum_sq2 = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
# checks that operator is deterministic
assert np.array_equal(sum_sq.asnumpy(), sum_sq2.asnumpy())
ref_sum_sq = mx.nd.array([(v.astype('float32') ** 2).sum() for v in values_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_sum_sq.asnumpy(), sum_sq.asnumpy(), atol=tol1, rtol=tol1)
@with_seed()
def test_multi_sum_sq():
min_nparam = 100
max_nparam = 120
min_dim = 50000
max_dim = 100000
max_ndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.gpu(0)]:
for dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(min_dim, max_dim + 1, size=max_ndim) for i in range(nparam)]
low_tol = ctx == mx.cpu(0) and ('float16'in [dtype])
tol1 = 1e-3 if low_tol else 1e-5
tol2 = 1e-6 if low_tol else 1e-7
check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2)
def check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2):
weights_arr = [np.random.rand(*shape).astype(w_dtype) * 10. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(g_dtype) for shape in shapes]
lrs = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 100.
wds = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 1000.
eta = (np.random.rand() + 0.1)
eps = (np.random.rand() + 0.1) / 10000.
mx_w = _make_ndarrays(weights_arr, ctx=ctx)
mx_g = _make_ndarrays(grads_arr, ctx=ctx)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=ctx)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=ctx)
w_sum_sq = mx.nd.multi_sum_sq(*mx_w, num_arrays=len(shapes))
g_sum_sq = mx.nd.multi_sum_sq(*mx_g, num_arrays=len(shapes))
ref_w_sum_sq = mx.nd.array([(w.astype('float32') ** 2).sum() for w in weights_arr],
dtype='float32', ctx=ctx)
ref_g_sum_sq = mx.nd.array([(g.astype('float32') ** 2).sum() for g in grads_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_w_sum_sq.asnumpy(), w_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
assert_almost_equal(ref_g_sum_sq.asnumpy(), g_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
rescale_grad = (np.random.rand() + 0.5) * 100.
mx_new_lrs = mx.nd.multi_lars(mx_lrs, w_sum_sq, g_sum_sq, mx_wds, eta=eta, eps=eps,
rescale_grad=rescale_grad)
ref_w_l2norm = mx.nd.sqrt(ref_w_sum_sq)
ref_g_l2norm = mx.nd.sqrt(ref_g_sum_sq * rescale_grad * rescale_grad)
ref_new_lrs = mx.nd.zeros(ref_w_l2norm.shape, dtype='float32', ctx=ctx)
for i in range(ref_w_l2norm.size):
_w = ref_w_l2norm[i]
_g = ref_g_l2norm[i]
if _w > 0.0 and _g > 0.0:
ref_new_lrs[i] = lrs[i] * eta * _w / (_g + wds[i] * _w + eps)
else:
ref_new_lrs[i] = lrs[i]
assert_almost_equal(ref_new_lrs.asnumpy(), mx_new_lrs.asnumpy(), atol=tol2, rtol=tol2)
@with_seed()
def test_fast_lars():
min_nparam = 50
max_nparam = 60
maxdim = 10000
maxndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.cpu(0), mx.gpu(0)]:
for w_dtype in dtypes:
for g_dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
lowTol = ctx == mx.cpu(0) and ('float16'in [w_dtype, g_dtype])
tol1 = 1e-3 if lowTol else 1e-5
tol2 = 1e-6 if lowTol else 1e-7
check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2)
def check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights):
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
weights_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
rescale_grad = (np.random.random() + 1.0)
mx_w = _make_ndarrays(weights_arr)
mx_g = _make_ndarrays(grads_arr)
mx_p_w = _make_ndarrays(weights_arr)
mx_p_g = _make_ndarrays(grads_arr)
lrs = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 100.)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=mx.gpu(0))
wds = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 1000.)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=mx.gpu(0))
if use_master_weights:
weights32_arr = [arr.astype('float32') for arr in weights_arr]
mx_w32 = _make_ndarrays(weights32_arr)
mx_p_w32 = _make_ndarrays(weights32_arr)
if momentum is None:
if use_master_weights:
mx.nd.multi_mp_sgd_update(
*_flatten_list(zip(mx_w, mx_g, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
mx.nd.preloaded_multi_mp_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
out = mx.nd.multi_sgd_update(
*_flatten_list(zip(mx_w, mx_g)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
if use_master_weights:
momentums_arr = [np.random.rand(*shape).astype("float32") for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
out = mx.nd.multi_mp_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_mp_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
else:
momentums_arr = [np.random.rand(*shape).astype(dtype) for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
mx.nd.multi_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
mx.nd.preloaded_multi_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol):
for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):
assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol)
if dtype == 'float16':
rtol = 1e-3
atol = 1e-2
else:
rtol = 1e-5
atol = 1e-6
_assert_all_almost_equal(mx_p_w, mx_w, rtol, atol)
if momentum is not None:
_assert_all_almost_equal(mx_p_m, mx_m, rtol, atol)
if use_master_weights:
_assert_all_almost_equal(mx_p_w32, mx_w32, 1e-5, 1e-6)
@with_seed()
def test_preloaded_multi_sgd():
dtypes = ['float16', 'float32']
momentums = [None, 0.9]
min_nparam = 5
max_nparam = 10
maxdim = 6
maxndim = 4
for dtype in dtypes:
use_master_weights_list = [False,] if dtype == 'float32' else [True, False]
for use_master_weights in use_master_weights_list:
for momentum in momentums:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
def test_conv_deconv_guards():
# Test cases for convolution and deconvolution via strided fft. Ensure that the framework
# guards against problematic CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING in cuDNN [7.3.1,7.5)
# see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/rel_750.html#rel_750
tol = 1e-1
for (op, opname) in [(mx.sym.Convolution, 'conv'), (mx.sym.Deconvolution, 'deconv')]:
dataname = opname + '_data'
ctx = {'ctx': mx.gpu(0), dataname: (32, 32, 64, 64), 'type_dict': {dataname: np.float32}}
test_cases = [
{'num_filter':32, 'kernel':(6,6), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,6), 'pad':(1,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,7), 'pad':(0,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,6), 'pad':(1,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(1,1), 'stride':(2,2), 'name': opname}]
for test_case_args in test_cases:
try:
sym = op(**test_case_args)
sym_no_cudnn = op(cudnn_off=True, **test_case_args)
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=tol)
except:
print('Test failure of mx.sym.{} with args: {}'.format(op.__name__, test_case_args))
raise
def _conv_with_num_streams(seed):
with random_seed(seed):
# Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad
num_trials = 20
for _ in range(num_trials):
size = np.random.randint(32, 128)
# The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible
# kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'.
ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size),
'type_dict': {'conv_data': np.float32}}
# Adding 'flip' here isolates the model from the input node (which can't use inplace store)
flipped = mx.sym.flip(axis=0, name='conv')
sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
flipped_no_cudnn = mx.sym.flip(axis=0, name='conv')
sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1),
cudnn_off=True, name='conv')
try:
# tol can be pretty high- we're looking for a large diff due to garbaged workspace
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=1e-2)
except:
print('Failing conv size = {}'.format(size))
raise
@with_seed()
def test_convolution_multiple_streams():
for num_streams in [1, 2]:
for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']:
print("Starting engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
run_in_spawned_process(_conv_with_num_streams,
{'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine})
print("Finished engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# More max-pooling strides and pads to test cudnn pooling implementation code paths
@with_seed()
def test_pooling_nhwc_with_convention():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]:
for kernel in [(2,2), (3,3), (4,4)]:
for stride in [(1,1), (1,2), (2,1), (2,2)]:
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape,
'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='valid', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='full', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=(300,300), pool_type='max',
global_pool=True, name='pool')
check_consistency_NxM(symlist, ctx_list)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=True)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=False)
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare cpu and gpu results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_nhwc_with_type():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
# NHWC pooling only enabled on GPU with CUDNN
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
@with_seed()
def test_pooling_versions():
# Produce the name of the 'transposed' layout, given the dimension
def transposed_layout(ndim):
if ndim < 3 or ndim > 5:
raise RuntimeError("Invalid data dim, expecting 3, 4 or 5")
return ('NWC', 'NHWC', 'NDHWC')[ndim-3]
# default padding is all zeros
def is_default_pad(pad):
return pad == (0,) * len(pad)
# default stride is all ones
def is_default_stride(stride):
return stride == (1,) * len(stride)
# returns True/False randomly with equal probability
def random_choice():
return np.random.random(1)[0] < 0.5
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False, p_value=2,
count_include_pad=True, tol=None, dtype=np.float32):
ctx_list = []
sym_list = []
for pool_ctx in pool_op_list:
(pool_op, ctx_type) = pool_ctx.rsplit('_', 1)
expected_ctxs = ['cpu', 'gpu', 'cudnn']
if ctx_type not in expected_ctxs:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type))
ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0)
ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}})
# start with pool args present in all cases
pool_op_args = {'kernel': kernel, 'pool_type': pool_type,
'pooling_convention' : pooling_convention, 'name' : 'pool'}
# add other args as needed
if global_pool:
pool_op_args['global_pool'] = True
else:
# Add pad and stride param if needed, plus randomly when it matches the default
if not is_default_pad(pad) or random_choice():
pool_op_args.update({'pad' : pad})
if not is_default_stride(stride) or random_choice():
pool_op_args.update({'stride' : stride})
expected_pool_ops = ['pool', 'pool_transposed', 'pool_v1']
if pool_op == 'pool_v1':
sym = mx.sym.Pooling_v1(**pool_op_args)
else:
pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad})
if ctx_type != 'cpu':
pool_op_args['cudnn_off'] = ctx_type == 'gpu'
if pool_op == 'pool':
# isolate pooling input from symbol input to test shared tensor optimizations
buffered_input = mx.sym.identity(name='pool')
sym = mx.sym.Pooling(buffered_input, **pool_op_args)
elif pool_op == 'pool_transposed':
ndim = len(data)
# NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1);
axes = (0,) + tuple(range(2,ndim)) + (1,)
transposed = mx.sym.transpose(axes=axes, name='pool')
pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim),
**pool_op_args)
# NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3);
axes = (0, ndim-1) + tuple(range(1,ndim-1))
sym = mx.sym.transpose(data=pooled, axes=axes, name='pool')
else:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops,
pool_op))
sym_list.append(sym)
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol)
def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True,
tol=None):
if dim == '1D':
data = (3, 3, 10)
kernels = [(4,), (4,), (5,)]
pads = [(0,), (2,), (2,)]
strides = [(1,), (2,), (1,)]
elif dim == '2D_no_padding':
data = (3, 2, 20, 20)
kernels = [(3, 3), (4, 5)]
pads = [(0, 0), (0, 0)]
strides = [(1, 1), (2, 1)]
elif dim == '2D':
data = (2, 2, 20, 20)
kernels = [(3, 3), (3, 5), (4, 5), (4, 5)]
pads = [(0, 0), (1, 2), (0, 0), (2, 3)]
strides = [(1, 1), (1, 1), (2, 1), (1, 1)]
elif dim == '3D':
data = (2, 3, 20, 20, 20)
kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)]
pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)]
strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)]
else:
raise RuntimeError('Unexpected pooling test class: {}.'.format(dim))
for kernel, pad, stride in zip(kernels, pads, strides):
for pooling_convention in ['valid', 'full']:
try:
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=pad, stride=stride,
pool_type=pool_type, pooling_convention=pooling_convention,
global_pool=False, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
except:
print('pool_op_list = {}'.format(pool_op_list))
print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride))
print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type,
pooling_convention))
print('p_value={}, count_include_pad={}, dtype={}'.format(p_value,
count_include_pad, dtype))
print('environ = \n{}'.format(os.environ))
raise
# Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value
kernel = kernels[0]
if random_choice():
kernel = (300,) * len(kernel)
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=None, stride=None,
pool_type=pool_type, global_pool=True, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
# The various implementations of the standard pooling operator
std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu',
'pool_gpu', 'pool_transposed_gpu',
'pool_cudnn', 'pool_transposed_cudnn']
# The implementations of the 'v1' pooling operator
v1_pool_op_list = ['pool_v1_cpu', 'pool_v1_gpu']
# For those cases when all implementations should match- the combined implementation list.
combo_pool_op_list = std_pool_op_list + v1_pool_op_list
for dtype in [np.float32, np.float64, np.float16]:
# Testing of the standard (not 'v1') pooling operator is universal across all
# data dimensions, implementations and layouts.
for dim in ['1D', '2D', '3D']:
test_pooling_dim(dim, 'max', dtype, std_pool_op_list)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False)
test_pooling_dim(dim, 'sum', dtype, std_pool_op_list)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3)
# Testing of the 'v1' pooling operator is over its restricted support domain of
# 2D data only and not with the 'lp' pooling type. The 'v1' cpu and gpu versions are
# always tested against each other, and sometimes against the standard operator versions.
# The slightly different 'v1' definition prevents this in the following cases:
#
# 1. In max pooling, when multiple input values are the maximum in the input window,
# the 'v1' implementation backprops the gradient to all maxima, whereas the standard
# pooling operator backprops the gradient to the lowest-indexed maximum only.
# 2. In max pooling, the 'v1' operator pads with 0's and this value can become the
# maximum output value in the case of an all-negative input. The standard pooling
# operator effectively considers the padding to be the largest negative value, so
# only input values should appear in the output.
# 3. In avg pooling, the 'v1' operator divides the sum by the same window size factor,
# even at the edges, and so does not support count_include_pad = False.
# 4. The float16 'v1' pooling operator performs forward sums and averages in
# float16, whereas the std operators perform those calculations in float32, so
# greater float16 tolerances are needed when comparing across implementations.
# Double the float16 tol when comparing v1 and non-v1 implemenations, per note 4 above.
relaxed_tol = {np.dtype(np.float16): 2e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0,
np.dtype(np.int64): 0}
# Exclude std implementations due to points 1 and 2 above.
test_pooling_dim('2D', 'max', dtype, v1_pool_op_list)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'avg', dtype, combo_pool_op_list, count_include_pad=True,
tol=relaxed_tol)
# Exclude std implementations due to point 3 above.
test_pooling_dim('2D', 'avg', dtype, v1_pool_op_list, count_include_pad=False)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'sum', dtype, combo_pool_op_list, tol=relaxed_tol)
# We can compare the standard and 'v1' max pooling implementations if we eliminate padding
# (see point 2 above) and use np.float64 data so that no two random input window values are
# likely to be the same (see point 1 above).
test_pooling_dim('2D_no_padding', 'max', np.float64, combo_pool_op_list)
@with_seed()
def test_pooling_full_2d():
def test_pooling_full_2d_type(pool_type):
data = (2, 2, 10, 10)
kernel = (4, 5)
pad = (1, 2)
stride = (3, 4)
convention = 'full'
ctx_list = []
sym_list = []
# o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4
# o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
check_consistency(sym_list, ctx_list)
test_pooling_full_2d_type('max')
test_pooling_full_2d_type('avg')
test_pooling_full_2d_type('sum')
@with_seed()
def test_flatten_slice_after_conv():
ctx_list = []
data = mx.sym.Variable('conv_data')
conv = mx.symbol.Convolution(data=data, name='conv', num_filter=16, kernel=(3,3), stride=(1,1))
flatten = mx.symbol.flatten(data=conv)
slice_sym = mx.symbol.slice(data=flatten, begin=0, end=1)
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}]
check_consistency(slice_sym, ctx_list)
@with_seed()
def test_bilinear_resize_op():
ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}]
data = mx.sym.Variable('data')
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=False)
check_consistency(sym, ctx_list)
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list, use_uniform=True)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
mx.test_utils.assert_allclose(mod1.get_outputs()[0], mod2.get_outputs()[0], rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
mx.test_utils.assert_allclose(args[bias_name], expected_bias)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
# Pad > 0
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.copy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad)
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y, gpu_y, atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad, gpu_a.grad, atol = 1e-3, rtol = 1e-3)
@with_seed()
def test_bilinear_sampler_versions():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym1 = mx.sym.BilinearSampler(data=data, grid=grid)
sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)
sym3 = mx.sym.BilinearSampler(data=data, grid=grid)
test_cases = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for item in test_cases:
data_shape, grid_shape = item
# kWriteTo
exe_cpu = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')
exe_gpu = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_cudnn = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_list = [exe_cpu, exe_gpu, exe_cudnn]
ref_idx = 0
test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
mx.test_utils.assert_almost_equal(exe_list[ref_idx].outputs[0], exe.outputs[0], rtol=1e-3, atol=1e-5)
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
for exe in exe_list:
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()
grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()
# kAddTo
exe_cpu_addto = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')
exe_gpu_addto = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_cudnn_addto = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]
data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)
grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.grad_dict['data'][:] = data_initial_grad
exe.grad_dict['grid'][:] = grid_initial_grad
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['data'], data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['grid'], grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)
for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:
# Mixture of kWriteTo and kNullOp
exe_cpu_mix = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)
exe_gpu_mix = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_cudnn_mix = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
if req_dict['data'] is 'write':
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
if req_dict['grid'] is 'write':
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
# isolated execution bulking test function to be invoked with different env var settings
def _test_bulking_in_process(seed, time_per_iteration):
data_shape = (10,)
num_ops = 1000
num_iterations = 20
ctx = default_context()
# build symbol
X = mx.sym.Variable('X')
sym = mx.sym.flip(X, axis=0)
for _ in range(num_ops-1):
sym = mx.sym.flip(sym, axis=0)
x = mx.ndarray.zeros(data_shape)
dx = mx.ndarray.zeros(data_shape)
dy = mx.ndarray.ones(data_shape)
exe = sym.bind(ctx=ctx, args=[x], args_grad = {'X':dx})
# time a number of forward() and backward() executions after some warm-up iterations
warmups = 1
for i in range(num_iterations+warmups):
if i == warmups:
start = time.time()
exe.forward(is_train=True)
exe.backward(dy)
dx.wait_to_read()
time_per_iteration.value = (time.time() - start) / num_iterations
@with_seed()
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/16517')
def test_bulking_operator_gpu():
_test_bulking(_test_bulking_in_process)
@unittest.skip('skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970')
def test_bulking():
# test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training)
test_cases = [(0,0,True), (1,1,True), (15,15,False), (15,0,True), (0,15,True), (15,15,True)]
times = {}
times_str = ''
for seg_sizes in test_cases:
# Create shared variable to return measured time from test process
time_per_iteration = mp.Manager().Value('d', 0.0)
if not run_in_spawned_process(_test_bulking_in_process,
{'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD' : seg_sizes[0],
'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD' : seg_sizes[1],
'MXNET_EXEC_BULK_EXEC_TRAIN' : seg_sizes[2]},
time_per_iteration):
# skip test since the python version can't run it properly. Warning msg was logged.
return
times[seg_sizes] = time_per_iteration.value
times_str += \
'\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\t{:.1f} msec'.format(
seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes])
fastest_non_bulked_time = min(times[(0,0,True)], times[(1,1,True)], times[(15,15,False)])
slowest_half_bulked_time = max(times[(0,15,True)], times[(15,0,True)])
fastest_half_bulked_time = min(times[(0,15,True)], times[(15,0,True)])
fully_bulked_time = times[(15,15,True)]
print(times_str)
# Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same,
# slower than both half-bulked times[0,15,True] and times[15,0,True]
assert slowest_half_bulked_time < fastest_non_bulked_time, \
'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \
.format(slowest_half_bulked_time - fastest_non_bulked_time, times_str)
# The fully bulked times[15,15,True] should be faster than both half-bulked runs
assert fully_bulked_time < fastest_half_bulked_time, \
'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \
.format(fully_bulked_time - fastest_half_bulked_time, times_str)
@with_seed()
def test_allclose_function_gpu():
allclose_function([mx.cpu(), mx.gpu(0)])
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
def math_log(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.log(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.log(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_erf(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.erf(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.erf(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_square(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.square(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.square(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def run_math(op, shape, dtype="float32", check_value=True):
run_num = 10
for i in range(run_num):
if op == 'log':
math_log(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'erf':
math_erf(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'square':
math_square(shape=shape, dtype=dtype, check_value=check_value)
@with_seed()
def test_math():
ops = ['log', 'erf', 'square']
check_value= True
shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]]
dtypes = ["float32", "float64"]
for shape in shape_lst:
for dtype in dtypes:
for op in ops:
run_math(op, shape, dtype, check_value=check_value)
@with_seed()
def test_arange_like_dtype():
dtypes = [np.float16, np.float32, np.float64]
for t in dtypes:
x = mx.sym.Variable('x', dtype=t)
y = mx.sym.reshape(x, shape=(0, 0, -1))
z = mx.sym.contrib.arange_like(y, axis=-1)
mod = z.simple_bind(ctx=mx.gpu(0), x=(3, 4, 5, 6), grad_req='null')
mod.arg_arrays[0][:] = np.random.normal(size=mod.arg_arrays[0].shape).astype(t)
out = mod.forward(is_train=False)
for v in out:
assert v.dtype == t
@with_seed()
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=mx.gpu(0),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=mx.gpu(0),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_selfatt():
for dtype in ['float16', 'float32']:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=mx.gpu(0),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=mx.gpu(0),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_encdec():
for dtype in ['float16', 'float32']:
check_multihead_attention_encdec(dtype=dtype)
if __name__ == '__main__':
import nose
nose.runmodule()
|
upload_wpt_results_test.py
|
# Copyright 2018 The WPT Dashboard Project. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import cgi
import json
import os
import shutil
import subprocess
import tempfile
import threading
import unittest
import zlib
here = os.path.dirname(os.path.abspath(__file__))
upload_bin = os.path.sep.join(
[here, '..', 'src', 'scripts', 'upload-wpt-results.py']
)
default_run_info = {
u'product': u'firefox',
u'bits': 64,
u'has_sandbox': True,
u'stylo': False,
u'e10s': True,
u'headless': False,
u'os_version': u'16.04',
u'linux_distro': u'Ubuntu',
u'browser_version': u'61.0a1',
u'version': u'Ubuntu 16.04',
u'debug': False,
u'os': u'linux',
u'processor': u'x86_64',
u'revision': u'503a4f322c662853f7956700830b37cf3f84390e'
}
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def log_message(*argv):
pass
def do_POST(self):
body_length = int(self.headers['Content-Length'])
content_type = cgi.parse_header(self.headers['Content-Type'])
if content_type[0] == 'multipart/form-data':
body = cgi.parse_multipart(self.rfile, content_type[1])
body['result_file'] = zlib.decompress(
body['result_file'][0], zlib.MAX_WBITS | 16
)
else:
body = str(self.rfile.read(body_length))
self.server.requests.append({
'headers': self.headers,
'payload': body
})
self.send_response(self.server.status_code)
self.send_header('Content-type', 'text/html')
self.end_headers()
def make_results():
return {
'1_of_2.json': {
'time_start': 1,
'time_end': 1,
'run_info': dict(default_run_info),
'results': [
{
'test': '/js/bitwise-or.html',
'status': 'OK',
'subtests': []
},
{
'test': '/js/bitwise-and.html',
'status': 'OK',
'subtests': [
{'status': 'FAIL', 'message': 'bad', 'name': 'first'},
{'status': 'FAIL', 'message': 'bad', 'name': 'second'}
]
}
]
},
'2_of_2.json': {
'time_start': 1,
'time_end': 1,
'run_info': dict(default_run_info),
'results': [
{
'test': '/js/bitwise-or-2.html',
'status': 'OK',
'subtests': []
}
]
}
}
class TestUploadWptResults(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.server = None
def tearDown(self):
try:
shutil.rmtree(self.temp_dir)
except OSError:
pass
if self.server:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
def upload(self, product, browser_channel, browser_version, os_name,
os_version, results_dir, results, port, override_platform,
total_chunks, git_branch, no_timestamps=False):
for filename in results:
with open(os.path.join(results_dir, filename), 'w') as handle:
json.dump(results[filename], handle)
cmd = [
upload_bin, '--raw-results-directory', results_dir,
'--product', product,
'--browser-channel', browser_channel,
'--browser-version', browser_version,
'--os', os_name,
'--os-version', os_version,
'--url', 'http://localhost:%s' % port,
'--user-name', 'fake-name',
'--secret', 'fake-secret',
'--override-platform', override_platform,
'--total-chunks', str(total_chunks),
'--git-branch', git_branch
]
if no_timestamps:
cmd.append('--no-timestamps')
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return (proc.returncode, stdout, stderr)
def assertBasicAuth(self, authorization, name, password):
parts = authorization.split(' ')
self.assertEqual(len(parts), 2)
self.assertEqual(parts[0], 'Basic')
self.assertEqual(parts[1].decode('base64'), '%s:%s' % (name, password))
def assertReport(self, json_data, expected_data):
expected_results = expected_data['results']
expected_metadata = dict(expected_data)
del expected_metadata['results']
actual_data = json.loads(json_data)
actual_results = actual_data['results']
actual_metadata = dict(actual_data)
del actual_metadata['results']
self.assertEqual(actual_metadata, expected_metadata)
self.assertItemsEqual(actual_results, expected_results)
def start_server(self, port):
self.server = BaseHTTPServer.HTTPServer(('', port), Handler)
self.server.status_code = 201
self.server.requests = []
def target(server):
server.serve_forever()
self.server_thread = threading.Thread(
target=target, args=(self.server,)
)
self.server_thread.start()
def test_basic(self):
self.start_server(9801)
returncode, stdout, stderr = self.upload('firefox',
'stable',
'2.0',
'linux',
'4.0',
self.temp_dir,
make_results(),
9801,
override_platform='false',
total_chunks=2,
git_branch='master')
self.assertEqual(returncode, 0, stderr)
requests = self.server.requests
self.assertEqual(len(requests), 1)
self.assertBasicAuth(
requests[0]['headers']['Authorization'], 'fake-name', 'fake-secret'
)
self.assertItemsEqual(
requests[0]['payload']['labels'][0].split(','),
['stable', 'master']
)
self.assertReport(requests[0]['payload']['result_file'], {
u'time_start': 1,
u'time_end': 1,
u'run_info': default_run_info,
u'results': [
{
u'test': u'/js/bitwise-or-2.html',
u'status': u'OK',
u'subtests': []
},
{
u'test': u'/js/bitwise-or.html',
u'status': u'OK',
u'subtests': []
},
{
u'test': u'/js/bitwise-and.html',
u'status': u'OK',
u'subtests': [
{
u'status': u'FAIL',
u'message': u'bad',
u'name': u'first'
},
{
u'status': u'FAIL',
u'message': u'bad',
u'name': u'second'
}
]
},
]
})
def test_alternate_branch(self):
self.start_server(9801)
returncode, stdout, stderr = self.upload('firefox',
'stable',
'2.0',
'linux',
'4.0',
self.temp_dir,
make_results(),
9801,
override_platform='false',
total_chunks=2,
git_branch='jelly-doughnut')
self.assertEqual(returncode, 0, stderr)
requests = self.server.requests
self.assertEqual(len(requests), 1)
self.assertBasicAuth(
requests[0]['headers']['Authorization'], 'fake-name', 'fake-secret'
)
self.assertItemsEqual(
requests[0]['payload']['labels'][0].split(','),
['stable', 'jelly-doughnut']
)
def test_consolidate_duration(self):
results = make_results()
results['1_of_2.json']['time_start'] = 50
results['1_of_2.json']['time_end'] = 400
results['2_of_2.json']['time_start'] = 10
results['2_of_2.json']['time_end'] = 300
self.start_server(9801)
returncode, stdout, stderr = self.upload('firefox',
'stable',
'2.0',
'linux',
'4.0',
self.temp_dir,
results,
9801,
override_platform='false',
total_chunks=2,
git_branch='master')
self.assertEqual(returncode, 0, stderr)
requests = self.server.requests
self.assertEqual(len(requests), 1)
self.assertBasicAuth(
requests[0]['headers']['Authorization'], 'fake-name', 'fake-secret'
)
self.assertItemsEqual(
requests[0]['payload']['labels'][0].split(','),
['stable', 'master']
)
self.assertReport(requests[0]['payload']['result_file'], {
u'time_start': 10,
u'time_end': 400,
u'run_info': default_run_info,
u'results': [
{
u'test': u'/js/bitwise-or-2.html',
u'status': u'OK',
u'subtests': []
},
{
u'test': u'/js/bitwise-or.html',
u'status': u'OK',
u'subtests': []
},
{
u'test': u'/js/bitwise-and.html',
u'status': u'OK',
u'subtests': [
{
u'status': u'FAIL',
u'message': u'bad',
u'name': u'first'
},
{
u'status': u'FAIL',
u'message': u'bad',
u'name': u'second'
}
]
},
]
})
def test_insert_platform(self):
self.maxDiff = None
self.start_server(9801)
returncode, stdout, stderr = self.upload('chrome',
'stable',
'66.0',
'windows',
'95',
self.temp_dir,
make_results(),
9801,
override_platform='true',
total_chunks=2,
git_branch='master')
self.assertEqual(returncode, 0, stderr)
expected_run_info = dict(default_run_info)
expected_run_info[u'product'] = u'chrome'
expected_run_info[u'browser_version'] = u'66.0'
expected_run_info[u'os'] = u'windows'
expected_run_info[u'os_version'] = u'95'
requests = self.server.requests
self.assertEqual(len(requests), 1)
self.assertBasicAuth(
requests[0]['headers']['Authorization'], 'fake-name', 'fake-secret'
)
self.assertItemsEqual(
requests[0]['payload']['labels'][0].split(','),
['stable', 'master']
)
self.assertReport(requests[0]['payload']['result_file'], {
u'time_start': 1,
u'time_end': 1,
u'run_info': expected_run_info,
u'results': [
{
u'test': u'/js/bitwise-or-2.html',
u'status': u'OK',
u'subtests': []
},
{
u'test': u'/js/bitwise-or.html',
u'status': u'OK',
u'subtests': []
},
{
u'test': u'/js/bitwise-and.html',
u'status': u'OK',
u'subtests': [
{
u'status': u'FAIL',
u'message': u'bad',
u'name': u'first'
},
{
u'status': u'FAIL',
u'message': u'bad',
u'name': u'second'
}
]
},
]
})
def test_failed_request(self):
self.start_server(9804)
self.server.status_code = 500
returncode, stdout, stderr = self.upload('chrome',
'stable',
'4.3.2',
'linux',
'4.0',
self.temp_dir,
make_results(),
port=9804,
override_platform='false',
total_chunks=2,
git_branch='master')
self.assertNotEqual(returncode, 0, stdout)
self.assertEqual(len(self.server.requests), 1)
def test_no_server(self):
returncode, stdout, stderr = self.upload('chrome',
'stable',
'4.3.2',
'linux',
'4.0',
self.temp_dir,
make_results(),
port=9802,
override_platform='false',
total_chunks=2,
git_branch='master')
self.assertNotEqual(returncode, 0, stdout)
def test_missing_results(self):
self.start_server(9802)
partial_results = make_results()
del partial_results['1_of_2.json']
returncode, stdout, stderr = self.upload('firefox',
'stable',
'1.0.1',
'linux',
'4.0',
self.temp_dir,
partial_results,
total_chunks=2,
override_platform='false',
port=9802,
git_branch='master')
self.assertNotEqual(returncode, 0, stdout)
self.assertEqual(len(self.server.requests), 0)
def test_missing_timestamps(self):
self.start_server(9802)
results = make_results()
del results['1_of_2.json']['time_start']
returncode, stdout, stderr = self.upload('firefox',
'stable',
'1.0.1',
'linux',
'4.0',
self.temp_dir,
results,
total_chunks=2,
override_platform='false',
port=9802,
git_branch='master')
self.assertNotEqual(returncode, 0, stdout)
self.assertEqual(len(self.server.requests), 0)
def test_no_timestamps(self):
self.start_server(9802)
results = make_results()
del results['1_of_2.json']['time_start']
del results['1_of_2.json']['time_end']
del results['2_of_2.json']['time_start']
del results['2_of_2.json']['time_end']
returncode, stdout, stderr = self.upload('firefox',
'stable',
'1.0.1',
'linux',
'4.0',
self.temp_dir,
results,
total_chunks=2,
override_platform='false',
port=9802,
git_branch='master',
no_timestamps=True)
self.assertEqual(returncode, 0, stderr)
requests = self.server.requests
self.assertEqual(len(requests), 1)
self.assertBasicAuth(
requests[0]['headers']['Authorization'], 'fake-name', 'fake-secret'
)
self.assertItemsEqual(
requests[0]['payload']['labels'][0].split(','),
['stable', 'master']
)
self.assertReport(requests[0]['payload']['result_file'], {
u'run_info': default_run_info,
u'results': [
{
u'test': u'/js/bitwise-or-2.html',
u'status': u'OK',
u'subtests': []
},
{
u'test': u'/js/bitwise-or.html',
u'status': u'OK',
u'subtests': []
},
{
u'test': u'/js/bitwise-and.html',
u'status': u'OK',
u'subtests': [
{
u'status': u'FAIL',
u'message': u'bad',
u'name': u'first'
},
{
u'status': u'FAIL',
u'message': u'bad',
u'name': u'second'
}
]
},
]
})
if __name__ == '__main__':
unittest.main()
|
trainer.py
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Trainer.
To run locally:
.. code-block:: bash
$ bazel build -c opt //lingvo:trainer
$ bazel-bin/lingvo/trainer --logtostderr \
--model=image.mnist.LeNet5 --mode=sync --logdir=/tmp/lenet5 \
--run_locally=cpu
To use GPU, add `--config=cuda` to build command and set `--run_locally=gpu`.
"""
import os
import re
import sys
import threading
import time
from lingvo import base_trial
from lingvo import datasets
from lingvo import executor
from lingvo import model_imports
from lingvo import model_registry
from lingvo import trainer_impl
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import checkpointer
from lingvo.core import cluster_factory
from lingvo.core import inference_graph_exporter
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.core import summary_utils
from lingvo.core import tpu_embedding_layers
import numpy as np
from lingvo import base_runner
from google.protobuf import text_format
# pylint:disable=g-direct-tensorflow-import
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop as tpu_training_loop
from tensorflow.python.tpu.ops import tpu_ops
# pylint:enable=g-direct-tensorflow-import
tf.flags.DEFINE_string(
'model', None, 'Name of the model class to train.'
'Must be a model defined in the model_registry.')
tf.flags.DEFINE_string(
'model_task_name', '', 'For multitask models: '
'select task to train/evaluate/decode. '
'Empty means to sample a task (training only).')
tf.flags.DEFINE_string('logdir', '', 'Log directory.')
tf.flags.DEFINE_bool(
'interactive', False,
'If True, enter interactive IPython for the controller job.')
tf.flags.DEFINE_string(
'run_locally', '',
'Can be empty, cpu, or gpu. If not empty, ignores cluster configuration '
'flags and runs controller and trainer in a single local process.')
tf.flags.DEFINE_string('tf_master', '', 'TF runtime.')
tf.flags.DEFINE_string(
'cluster_spec', '', 'A tf.train.ClusterSpec to override the master. '
'The dict is specified as: job=host1:port1,host2:port2,'
'host3:port3@job2=host3:port4,...')
tf.flags.DEFINE_string(
'mode', 'async', 'How this trainer binary is used. '
'async: used in an async training setup; '
'sync: used in a sync training setup; '
'shell: an interactive shell for development; '
'inspect_evaler: print evaler dataset names; '
'inspect_decoder: print decoder dataset names; '
'write_inference_graph: write inference graphs to logdir.')
tf.flags.DEFINE_string('job', '', 'trainer/controller/eval, etc.')
tf.flags.DEFINE_integer('task', 0, 'Task id within the job.')
tf.flags.DEFINE_string('controller_job', '/job:controller', 'Job name.')
tf.flags.DEFINE_integer('controller_gpus', 0, 'Number of controller GPUs.')
tf.flags.DEFINE_string('worker_job', '/job:trainer', 'Job name.')
tf.flags.DEFINE_list('additional_worker_jobs', [],
'Additional worker job names.')
tf.flags.DEFINE_integer('worker_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('worker_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_integer('worker_tpus', 0, 'Number of tpus to use per replica.')
tf.flags.DEFINE_integer('worker_num_tpu_hosts', 0, 'Number of tpu hosts.')
tf.flags.DEFINE_integer('worker_split_size', 1,
'Number of devices for one split.')
tf.flags.DEFINE_string('ps_job', '/job:ps', 'Job name')
tf.flags.DEFINE_integer('ps_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('ps_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('input_job', '/job:input', 'Job name')
tf.flags.DEFINE_integer('input_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_string(
'input_targets', '', 'Target network addresses for the '
'input job. E.g., a single ip:port, or a list of '
'comma-separated grpc://ip:port, etc.')
tf.flags.DEFINE_string('evaler_job', '/job:evaler', 'Job name')
tf.flags.DEFINE_integer('evaler_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('evaler_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('decoder_job', '/job:decoder', 'Job name')
tf.flags.DEFINE_integer('decoder_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('decoder_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('tf_data_service_address', '',
'The address of the tf.data service.')
tf.flags.DEFINE_string(
'inference_graph_filename', None,
'Output inference graph filename. If unspecified, output two inference '
'graphs, one for CPU and one for TPU using the default settings.')
tf.flags.DEFINE_string(
'inference_graph_device', None,
'Type of device the output inference graph is for. This flag is applicable '
'only when FLAGS.inference_graph_filename is specified.')
tf.flags.DEFINE_integer(
'inference_graph_random_seed', None,
'Random seed to fix when exporting inference graph. '
'Not fixed when set to None.')
tf.flags.DEFINE_list(
'graph_def_filename', [],
'Output inference graph_def filenames. Defaults to CPU graph if '
'inference_graph_filename and inference_graph_device are not specified.')
tf.flags.DEFINE_string(
'inference_dataset_name', 'Test',
'Name of the dataset whose params to be extracted inference graph with.')
tf.flags.DEFINE_bool(
'inference_gen_tpu_init_op', True,
'Whether the tpu_init_op subgraph is generated for TPU inference graph.')
tf.flags.DEFINE_bool(
'evaler_in_same_address_as_controller', False,
'Whether or not evaler is in the same address space as '
'controller. This flag is meant for unittest only.')
tf.flags.DEFINE_string(
'vizier_reporting_job', 'evaler',
'Job responsible for reporting metrics. This specifies a '
'job prefix, evaler will match all evaler jobs, while '
'evaler_dev and decoder_dev will only match the corresponding '
'jobs that are on the dev set.')
tf.flags.DEFINE_bool(
'add_summary', None,
'Whether we should output summaries. The default value "None", enables '
'summaries based on the job type.')
tf.flags.DEFINE_bool('disable_tf2', False,
'Whether run on Tensorflow without V2 behaviors.')
@tf.flags.validator('vizier_reporting_job')
def _ValidateVizierReportingJob(value):
if value in ['evaler', 'decoder']:
return True
if value.startswith('evaler_') or value.startswith('decoder_'):
return True
tf.logging.info('vizier_reporting_job should usually start with evaler or '
'decoder, unless in executor/program mode. '
f'vizier_reporting_job={value}')
return True
tf.flags.DEFINE_integer(
'enqueue_max_steps', None, 'Max enqueue steps. -1 meaning no limit.'
' This flag should be set for unit-test only.')
tf.flags.DEFINE_integer('saver_max_to_keep', None,
'Maximum number of recent checkpoints to keep.')
tf.flags.DEFINE_float('saver_keep_checkpoint_every_n_hours', None,
'How often to keep a checkpoint.')
tf.flags.DEFINE_bool(
'checkpoint_in_trainer_tpu', False,
'Whether to enable checkpointing in TrainerTpu, allowing for '
'operation without a separate Controller task.'
'This flag also disables checkpointing from the Controller, '
'but still allows it to write summaries.')
tf.flags.DEFINE_string(
'tpu', None,
'The Cloud TPU on GCP to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url. If set, other cluster parameters (such as --cluster_spec) will be '
'configured automatically with TPUClusterResolver.')
tf.flags.DEFINE_string(
'gcp_project', None,
'Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
tf.flags.DEFINE_string(
'tpu_zone', None,
'GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Please consider adding model params instead of adding flags.
FLAGS = tf.flags.FLAGS
# useful for debugging.
def _StartShell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython # pylint: disable=g-import-not-at-top
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
class Controller(base_runner.BaseRunner):
"""Controller for a training cluster."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'controller'
assert not self._model_task_name, 'Controller needs all tasks!'
self._control_dir = os.path.join(self._logdir, 'control')
tf.io.gfile.makedirs(self._control_dir)
self._checkpoint_in_controller = True
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpoint_in_controller = False
if self._early_stop:
tf.logging.warning('Controller ignoring early_stop since '
'TrainerTpu is driving training.')
self._early_stop = None
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._summary_writer = self._CreateSummaryWriter(self._control_dir)
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
if self._checkpoint_in_controller:
self.checkpointer = self._CreateCheckpointer(
self._train_dir,
self._model,
init_op=self._initialize_global_vars)
self._ExportMetrics(params=self.params)
self._model_analysis, self._total_num_params = summary_utils.ModelAnalysis(
self._model)
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._control_dir,
'model_analysis.txt')
self._WriteToLog(self.params.ToText(), self._control_dir, 'params.txt')
self._WriteToLog(
text_format.MessageToString(self.params.ToProto(), as_utf8=True),
self._control_dir, 'params.pbtxt')
tf.io.write_graph(self._graph.as_graph_def(), self._control_dir,
'train.pbtxt')
def _CreateCheckpointer(self, train_dir, model, init_op=None):
"""Wrapper method for override purposes."""
return checkpointer.Checkpointer(train_dir, model, init_op)
def Start(self):
self._RunLoop('controller', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'controller/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _Loop(self):
self._summary_writer.add_graph(self._graph)
with tf.container(self._container_id), self._GetSession() as sess:
if FLAGS.interactive:
# Into interactive debugging mode.
_StartShell(locals())
return
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
for task in self._model.tasks:
task.input.Initialize(sess)
# TODO(zhifengc): Moves these options into params.
tp = self.params.train
summary_interval_steps = tp.summary_interval_steps
save_interval_seconds = tp.save_interval_seconds
next_summary_step = 1
if not self._checkpoint_in_controller:
global_step = self._WaitUntilInit(sess)
while True:
now = time.time()
next_iteration_seconds = now + min(
10, save_interval_seconds) # 10 seconds or less
if self._checkpoint_in_controller:
# Init/restore variable if needed.
self.checkpointer.RestoreIfNeeded(sess)
global_step = sess.run(self._model.global_step)
if self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
if self._checkpoint_in_controller:
self.checkpointer.Save(sess, global_step)
sess.close()
self._DequeueThreadComplete()
return
if self._checkpoint_in_controller:
# Checkpoint if it's time.
self.checkpointer.MaybeSave(sess, global_step)
# Summary.
if self._summary_op is not None and global_step >= next_summary_step:
global_step, summary_str = sess.run(
[self._model.global_step, self._summary_op])
next_summary_step = global_step + summary_interval_steps
if isinstance(summary_str, np.ndarray) and summary_str.size == 0:
tf.logging.info('Skipping summary: %s', summary_str)
else:
self._summary_writer.add_summary(summary_str, global_step)
tf.logging.info('Write summary @%s', global_step)
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
tf.logging.info('Write summary done: step %d', global_step)
now = time.time()
if now < next_iteration_seconds:
time.sleep(next_iteration_seconds - now)
def _SummarizeValue(self, step, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), step)
Trainer = trainer_impl.Trainer
class TrainerTpu(base_runner.BaseRunner):
"""Trainer on TPU."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'trainer_tpu'
# Multiple TPU trainer tasks not tested/implemented.
assert self._cluster.num_replicas == 1
data_parallelism = self._cluster.num_splits_per_client
assert data_parallelism
num_devices_per_split = self._cluster.num_devices_per_split
tf.logging.info('data_parallelism: %d, num_devices_per_split: %d',
data_parallelism, num_devices_per_split)
self._steps_per_loop = min(self.params.train.tpu_steps_per_loop,
self.params.train.max_steps)
self._step_rate_tracker = summary_utils.StepRateTracker()
self._compile_op = None
self._initialized = threading.Event()
tf.logging.info(
'Creating TrainerTpu using data parallelism %s '
'and %s steps_per_loop', data_parallelism, self._steps_per_loop)
@py_utils.RetryOnTransientTfError()
def _WaitUntilInitTpu():
"""Wait until the model is ready."""
try:
# tpu.initialize_system() is called with None as embedding_config, as
# embedding_config is not available yet. Later in _Loop, it is called
# with the correct embedding_config. Since it cannot be called twice in
# the same graph with different embedding_config, we use a dummy_graph
# here.
dummy_graph = tf.Graph()
with dummy_graph.as_default():
tpu_initialize_system_op = tf.tpu.initialize_system(
embedding_config=None, job=None)
with self._GetSession(graph=dummy_graph) as sess:
topology = sess.run(tpu_initialize_system_op)
if self.params.train.tpu_computation_shape is None:
computation_shape = py_utils.ComputationShape(num_devices_per_split,
topology)
else:
computation_shape = self.params.train.tpu_computation_shape
assert num_devices_per_split == np.prod(computation_shape)
if self.params.train.tpu_device_order_mode is None:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=computation_shape,
num_replicas=data_parallelism)
else:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=computation_shape,
num_replicas=data_parallelism,
device_order_mode=self.params.train.tpu_device_order_mode)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
tf.logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
except py_utils.transient_tf_errors as e:
tf.logging.info('TPU initialization failed: %s', e)
raise
_WaitUntilInitTpu()
with self._graph.as_default(), tf.container(self._container_id):
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
self._CreateTF2SummaryWriter(self._train_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
with cluster_factory.SetImmediatelyInstantiateVariables(False):
self._model = self.params.Instantiate()
self._task = self._model.GetTask()
self._task.input.InstantiateVariables()
self._task.input.CreateTpuEnqueueOps()
self._eval_metrics = metrics.TpuEvalMetrics()
# Needed due to the AddExtraTheta() reference to global_step when
# instantiating the InputGenerator.
_ = py_utils.GetOrCreateGlobalStepVar()
self._CreateTF2SummaryOps()
self._input_stats_summary_interval_steps = (
self._task.input.params.input_stats_summary_interval_steps)
def TpuTrainStep(*args):
"""Train a shard of a batch on a single TPU core.
Args:
*args: metrics values from previous steps.
Returns:
New summed metrics values and a train_op.
"""
self._model.InstantiateVariables()
self._model.ConstructFPropBPropGraph()
tpu_embedding_collection = (
tpu_embedding_layers.TpuEmbeddingCollection.Get())
self._load_ops = tpu_embedding_collection.load_ops
self._retrieve_ops = tpu_embedding_collection.retrieve_ops
self._tpu_embedding = tpu_embedding_collection.tpu_embedding
per_step_eval_metrics = self._eval_metrics.SetMetrics(
self._task.eval_metrics, args)
outfeed_op = self._OutfeedEnqueue(self._task.per_example_tensors)
summed_metrics = []
assert len(per_step_eval_metrics) == len(args)
with tf.control_dependencies([outfeed_op]):
for x, y in zip(per_step_eval_metrics, args):
summed_metrics.append(x + y)
return summed_metrics + [self._task.train_op]
@tpu_function.on_device_training_loop
def TpuTrain():
loop_result = tpu_training_loop.repeat(
self._steps_per_loop,
TpuTrainStep,
inputs=self._eval_metrics.initial_values,
name='train_loop')
# Final metrics are the avg across self._steps_per_loop steps.
return self._eval_metrics.FinalizeMetrics(loop_result)
self._compile_op, batch_parallel_res = tpu.split_compile_and_shard(
TpuTrain,
num_shards=data_parallelism,
device_assignment=py_utils.GetTpuDeviceAssignment())
outfeed_dequeue_op = self._OutfeedDequeueLoop(
self._task.per_example_tensors, self._steps_per_loop,
self._cluster.num_splits_per_client)
self._task.input.CreateTpuEmbeddingEnqueueOps()
def _ConstructPostTrainingLoop(train_loop_op, outfeed_dequeue_op):
"""Returns the op for tpu training with tail cpu computation."""
# Adds a tail computation that is run after the tpu_training loop
# step finishes. This allows us to run certain computation that
# acts on the variable between tpu_train_loop iterations and
# amortizing the cost of the operations. Alternative of running
# tpu.outside_compilation & using tf.cond is expensive.
with tf.control_dependencies(train_loop_op):
self._model.ConstructPostTrainingLoop(outfeed_dequeue_op)
with tf.control_dependencies([self._task.post_training_loop_op]):
return ([[tf.identity(o) for o in train_loop_op],
outfeed_dequeue_op])
# Get metric result from a single replica; they are all same here.
all_tpu_ops = [t[0] for t in batch_parallel_res]
self._tpu_train_ops = (
_ConstructPostTrainingLoop(all_tpu_ops, outfeed_dequeue_op))
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self._initialize_tables = tf.tables_initializer()
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer = checkpointer.Checkpointer(
self._train_dir, self._model, init_op=self._initialize_global_vars)
self.enqueue_ops = self._task.input.tpu_infeed_op
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
if self._task.input.input_data_summary_layout is not None:
self._summary_writer.add_summary(
self._task.input.input_data_summary_layout)
if FLAGS.checkpoint_in_trainer_tpu:
self._model_analysis, self._total_num_params = (
summary_utils.ModelAnalysis(self._model))
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._train_dir,
'model_analysis.txt')
# Saves the graph def.
tf.io.write_graph(self._graph.as_graph_def(), self._train_dir,
'train.pbtxt')
# Saves the trainer params.
self._WriteToLog(self.params.ToText(), self._train_dir,
'trainer_params.txt')
def _GetSession(self, **kwargs):
return super()._GetSession(cluster_def=self._worker_cluster_def, **kwargs)
def _OutfeedEnqueue(self, per_example_tensors):
if not per_example_tensors:
return tf.no_op()
per_example_tensors = py_utils.NestedMap(per_example_tensors)
return tpu_ops.outfeed_enqueue_tuple(per_example_tensors.Flatten())
def _OutfeedDequeueLoop(self, per_example_tensors, num_loops, num_devices):
"""Process all per-example tensor outfeed data for a TPU sess.run.
Args:
per_example_tensors: dict of key -> tensor as generated by TpuTrainStep.
num_loops: number of times that TpuTrainStep will be executed by TpuTrain.
num_devices: number of TPU cores assigned to this process.
Returns:
A dict of per-example tensors from the latest TpuTrainStep.
"""
if not per_example_tensors:
return tf.no_op()
tensor_shapes = [
py_utils.GetShape(per_example_tensors[key])
for key in sorted(per_example_tensors)
]
tensor_types = [
tf.as_dtype(per_example_tensors[key].dtype)
for key in sorted(per_example_tensors)
]
def LoopBody(i, *input_arrays):
"""Process outfeed data for a single TpuTrainStep.
Args:
i: current loop index.
*input_arrays: One tf.TensorArray per outfeed tensor.
Returns:
i+1 (new index) plus post-write tf.TensorArray handles.
"""
# Outfeed ops execute on each JF node, so they must be located on the
# nodes.
outfeed_devices = []
device_assignment = py_utils.GetTpuDeviceAssignment()
assert device_assignment
for replica in range(device_assignment.num_replicas):
for core in range(device_assignment.num_cores_per_replica):
with tf.device(device_assignment.host_device(replica, core)):
outfeed_devices.append(
tpu_ops.outfeed_dequeue_tuple(
tensor_types,
tensor_shapes,
device_ordinal=device_assignment.tpu_ordinal(replica,
core)))
offset = i * num_devices
output_arrays = list(input_arrays)
# Each output_array holds a different per-example tensor. We get results
# for each tensor from each TPU for each TpuTrainStep call.
for j in range(len(output_arrays)):
for k in range(len(outfeed_devices)):
output_arrays[j] = output_arrays[j].write(offset + k,
outfeed_devices[k][j])
return tuple([i + 1] + output_arrays)
def LoopCond(i, *output_arrays):
del output_arrays
return i < num_loops
output_arrays = [
tf.TensorArray(
tensor_types[i],
size=num_loops * num_devices,
element_shape=tensor_shapes[i]) for i in range(len(tensor_shapes))
]
# Loop once for each time that TpuTrainStep runs.
output_arrays = tf.while_loop(
LoopCond, LoopBody, [0] + output_arrays, parallel_iterations=1)[1:]
concatenated_arrays = [array.concat() for array in output_arrays]
return dict(zip(sorted(per_example_tensors), concatenated_arrays))
def _CleanUp(self):
# If there's an exception, we want _LoopEnqueue to wait until
# everything is initialized before starting up.
self._initialized.clear()
def Start(self):
# Run training.
self._RunLoop('trainer', self._Loop, cleanup_func=self._CleanUp)
def _InfeedLoop(self, sess):
tf.logging.info('_InfeedLoop start')
for _ in range(self._steps_per_loop):
sess.run(self.enqueue_ops)
def StartEnqueueOp(self, op):
# When retrieve ops for TPU embedding is present, we use _InfeedLoop above
# instead to make sure enqueue and retrieve does not happen at the same
# time as required by TPU embedding.
# We can remove this by using a tf.while_loop driven infeed op.
if self._retrieve_ops:
return
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
# Wait for _Loop to initialize variables first before attempting to infeed.
tf.logging.info('_LoopEnqueue waiting for _initialized...')
self._initialized.wait()
tf.logging.info('_LoopEnqueue proceeding.')
# The global step may not be initialized in this thread if the target server
# uses session state isolation (e.g. Cloud TPUs).
sess = self._GetSession()
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer.RestoreGlobalStepIfNeeded(sess)
# Get merged summary op for training related input data stats from the
# tasks's input generator.
self._merged_input_data_summary_op = (
self._task.input.merged_input_data_summary_op)
return super()._LoopEnqueue(op, sess)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
self._DequeueThreadComplete()
return
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
config_proto = (
self._tpu_embedding.config_proto
if self._tpu_embedding is not None else None)
sess.run(
tf.tpu.initialize_system(embedding_config=config_proto, job=None))
sess.run(self._initialize_tables)
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
if FLAGS.run_locally == 'tpu':
sess.run(self._initialize_global_vars)
self._SetStatusMessage('Compiling ...')
compilation_result = sess.run(self._compile_op)
comp_result_proto = tpu_compilation_result.CompilationResultProto()
comp_result_proto.ParseFromString(compilation_result)
if comp_result_proto.status_error_message:
tf.logging.fatal('Compilation failed: {}'.format(
comp_result_proto.status_error_message))
self._SetStatusMessage('Compiling done.')
if FLAGS.checkpoint_in_trainer_tpu:
# For b/134415393 -- better to initialize to a known state than
# rely on what's in the session on the trainer/TPU worker.
tf.logging.info('TrainerTpu: Force restore or initialize.')
self.checkpointer.Restore(sess, force_reinitialize=True)
global_step = sess.run(self._model.global_step)
self._initialized.set()
eval_metrics = None
if FLAGS.checkpoint_in_trainer_tpu and global_step == 0:
# Always save a ckpt at step 0.
self.checkpointer.MaybeSave(sess, global_step)
sess.run(self._load_ops)
while True:
train_steps_start = time.perf_counter()
if FLAGS.checkpoint_in_trainer_tpu:
# Init/restore variable if needed.
self.checkpointer.RestoreIfNeeded(sess)
if self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics):
# Early terminate gracefully by setting a new max step horizon: three
# more TPU steps to ensure that the enqueue ops can gracefully
# terminate as well.
if self._max_steps is None:
self._max_steps = global_step + 3 * self._steps_per_loop
tf.logging.info('Early stopping at step: %d', self._max_steps)
if self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
if FLAGS.checkpoint_in_trainer_tpu:
self.checkpointer.Save(sess, global_step)
self._DequeueThreadComplete()
return
if self._retrieve_ops:
infeed_loop_thread = threading.Thread(
target=self._InfeedLoop, args=(sess,))
infeed_loop_thread.start()
tpu_train_op_start = time.perf_counter()
values, outfeeds = sess.run(self._tpu_train_ops)
tpu_train_op_secs = time.perf_counter() - tpu_train_op_start
if self._retrieve_ops:
infeed_loop_thread.join()
tf.logging.info('Retrieve params.')
sess.run(self._retrieve_ops)
tf.logging.info('Retrieve params done.')
self._eval_metrics.PackMetricsValues(values)
eval_metrics = self._eval_metrics.metrics
# Note: global_step is incremented by self._steps_per_loop by the
# previous sess.run call.
task_global_step = sess.run(self._task.global_step)
global_step = sess.run(self._model.global_step)
if not self._task.per_example_tensors:
outfeeds = {}
self._task.ProcessFPropResults(sess, task_global_step, eval_metrics,
outfeeds)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
outfeeds)
step_rate, example_rate, total_examples = (
self._step_rate_tracker.ComputeStepRate(
global_step,
eval_metrics['num_samples_in_batch'][0] * self._steps_per_loop))
self._RunTF2SummaryOps(sess)
self._SummarizeValue(global_step, 'global_step/sec', step_rate)
self._SummarizeValue(global_step, 'examples/sec', example_rate)
self._SummarizeValue(global_step, 'total_samples', total_examples)
if FLAGS.checkpoint_in_trainer_tpu:
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
msg = 'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' % (
global_step, step_rate, example_rate)
for key, (val, _) in sorted(eval_metrics.items()):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
self._SetStatusMessage(msg)
# Add model eval metrics to early stop metric history.
for metric_name, (metric_value, _) in eval_metrics.items():
self._UpdateEarlyStopMetric('train', global_step, metric_name,
metric_value)
checkpoint_write_secs = 0.0
if FLAGS.checkpoint_in_trainer_tpu:
checkpoint_write_start = time.perf_counter()
checkpoint_saved = self.checkpointer.MaybeSave(sess, global_step)
if checkpoint_saved:
checkpoint_write_secs = time.perf_counter() - checkpoint_write_start
train_steps_secs = time.perf_counter() - train_steps_start
self._ExportMetrics(
# Metrics expects python int, but global_step is numpy.int64.
global_step=int(global_step),
step_rate=step_rate,
example_rate=example_rate,
tpu_train_op_secs=tpu_train_op_secs,
checkpoint_write_secs=checkpoint_write_secs,
total_train_steps_secs=train_steps_secs,
**{k: v[0] for k, v in eval_metrics.items()})
class Evaler(base_runner.BaseRunner):
"""Evaler."""
def __init__(self, eval_type, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'evaler_' + eval_type
self._output_name = 'eval_' + eval_type
self._export = eval_type == 'train'
if not self._export:
tf.logging.info(f'Job {self._job_name} will not export the model.')
self.params.cluster.do_eval = True
self._cluster = cluster_factory.Cluster(self.params.cluster)
self._eval_dir = os.path.join(self._logdir, self._output_name)
if self._model_task_name:
self._eval_dir += '_' + str(self._model_task_name)
tf.io.gfile.makedirs(self._eval_dir)
self._eval_path = None
# Multitask params doesn't have 'task'.
if 'task' in self.params:
self._eval_path = checkpointer.GetSpecificCheckpoint(
self.params.task.eval.load_checkpoint_from)
self._should_report_metrics = self._job_name.startswith(
self._cluster.reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
self._summary_writer = self._CreateSummaryWriter(self._eval_dir)
self._CreateTF2SummaryWriter(self._eval_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropGraph()
self._task = self._model.GetTask(self._model_task_name)
self.checkpointer = self._CreateCheckpointer(self._train_dir,
self._model)
self._CreateTF2SummaryOps()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for eval models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
self._input_stats_summary_interval_steps = (
self._task.input.params.input_stats_summary_interval_steps)
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._eval_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.io.write_graph(self._graph.as_graph_def(), self._eval_dir,
'%s.pbtxt' % self._output_name)
def _CreateCheckpointer(self, train_dir, model):
"""Wrapper method for override purposes."""
return checkpointer.Checkpointer(train_dir, model)
def Start(self):
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
"""The main loop."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
self._task.input.Initialize(sess)
if self._eval_path:
self._EvalOnce(sess, self._eval_path)
self._UpdateProcessedCheckpoints(self._eval_dir, self._eval_path)
elif self._task.params.eval.eval_all_checkpoints:
self._RunOnAllCheckpoints(sess, self._EvalOnce, self._eval_dir)
else:
self._RunOnLatestCheckpoints(sess, self._EvalOnce, self._eval_dir)
if self._should_report_metrics:
tf.logging.info('Reporting trial done.')
self._trial.ReportDone()
tf.logging.info('Evaluation finished.')
def EvalLatestCheckpoint(self, last_path=None):
"""Runs eval once on the latest checkpoint."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already evaluated.')
return
self._EvalOnce(sess, path)
def EvalCheckpoint(self, ckpt_id):
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = '{}/ckpt-{:08d}'.format(self._train_dir, ckpt_id)
self._EvalOnce(sess, path)
def _RemoveScalarSummaries(self, summaries):
proto = summary_pb2.Summary()
proto.ParseFromString(summaries)
for i, value in enumerate(proto.value):
if value.WhichOneof('value') == 'simple_value':
del proto.value[i]
return proto.SerializeToString()
def _EvalOnce(self, sess, path):
"""Runs evaluation for a batch of samples.
Args:
sess: the tf Session.
path: checkpoint path.
"""
if not FLAGS.evaler_in_same_address_as_controller:
self.checkpointer.RestoreFromPath(sess, path)
global_step = sess.run(py_utils.GetGlobalStep())
# Save any additional information to disk before evaluation.
if self._export:
self._task.Export(path)
# Check after how many steps checkpoint got saved.
# And decide whether to run an evaluation.
if global_step < self._task.params.eval.start_eval_after:
return
if self._task.input.params.resettable:
tf.logging.info('Resetting input_generator.')
self._task.input_generator.Reset(sess)
metrics_dict = {
name: metrics.AverageMetric() for name in self._task.eval_metrics
}
num_samples_metric = metrics_dict['num_samples_in_batch']
samples_per_summary = self._task.params.eval.samples_per_summary
if samples_per_summary == 0:
assert self._task.input.params.resettable
while samples_per_summary == 0 or (num_samples_metric.total_value <
samples_per_summary):
try:
is_first_loop = num_samples_metric.total_value == 0
# NOTE: We intentionally do not let FProp generate scalar summaries by
# default, because evaler calls FProp multiple times for each
# checkpoint. Multiple summaries at the same step is often confusing.
# Instead, models should update eval_metrics and generate aggregate
# summaries. Other types of summaries (images, audio etc.) will be
# generated for the first eval batch.
if self._summary_op is not None and is_first_loop:
ans, summaries = sess.run([self._task.eval_metrics, self._summary_op])
summaries = self._RemoveScalarSummaries(summaries)
# Add non-scalar summaries only for the first batch of data.
self._summary_writer.add_summary(summaries, global_step)
self._summary_writer.flush()
else:
ans = sess.run(self._task.eval_metrics)
for name, (value, weight) in ans.items():
metrics_dict[name].Update(value, weight)
tf.logging.info('Total examples done: %d/%d',
num_samples_metric.total_value, samples_per_summary)
except tf.errors.OutOfRangeError:
if not self._task.input.params.resettable:
raise
break
# Replace average values with total values for certain metrics.
if 'num_predictions' in metrics_dict:
metrics_dict['num_predictions'].total_weight = 1.0
if 'num_words' in metrics_dict:
metrics_dict['num_words'].total_weight = 1.0
self._RunTF2SummaryOps(sess)
summaries = {k: v.Summary(k) for k, v in metrics_dict.items()}
summaries['total_samples'] = metrics.CreateScalarSummary(
'total_samples', num_samples_metric.total_value)
# When we have evaluated so many samples, generate a summary.
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._eval_dir),
global_step,
summaries,
text_filename=os.path.join(self._eval_dir,
'score-{:08d}.txt'.format(global_step)))
# Get merged summaries for input data stats logged by the tasks's input
# generator and write summaries for the stats.
if self._task.input.merged_input_data_summary_op is not None:
input_stats_summary_str = sess.run(
self._task.input.merged_input_data_summary_op)
self._WriteInputDataStatSummaries(input_stats_summary_str, global_step)
if self._should_report_metrics:
tf.logging.info('Reporting eval measure for step %d.' % global_step)
self._trial.ReportEvalMeasure(global_step, metrics_dict, path)
Decoder = trainer_impl.Decoder
GetDecoderDir = trainer_impl.GetDecoderDir
def _GetClusterSpecDict():
"""Parses the cluster_spec flag and returns a dict."""
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = {}
for job_spec in job_specs:
# ps_host=worker1:1231,worker2:1234
job_machines = job_spec.split('=')
if len(job_machines) != 2:
raise ValueError(f'Invalid job specification: {job_spec}')
cluster_spec_dict[job_machines[0]] = job_machines[1].split(',')
return cluster_spec_dict
class RunnerManager:
"""Helper class for managing runners."""
# This is a hack so these classes can be overridded with internal
# non-public implementations.
# pylint: disable=invalid-name
inference_graph_exporter = inference_graph_exporter
model_registry = model_registry
Controller = Controller
Trainer = Trainer
TrainerTpu = TrainerTpu
Evaler = Evaler
Decoder = Decoder
ExecutorTpu = executor.ExecutorTpu
# pylint: enable=invalid-name
def __init__(self, model):
self._model_name = model
def MaybeLaunchTensorFlow(self):
"""Starts TF machinery in this process."""
if FLAGS.run_locally or FLAGS.tpu:
return
tf.logging.info('Launching tensorflow.')
target = FLAGS.tf_master
if not target.startswith('localhost'):
# E.g., trainer_client is configured w/ FLAGS.tf_master pointing to
# another job. In that case, start a local server.
cluster_spec_dict = _GetClusterSpecDict()
self._tf_server = tf.distribute.Server(
tf.train.ClusterSpec(cluster_spec_dict),
job_name=FLAGS.job,
task_index=FLAGS.task)
target = self._tf_server.target
if not FLAGS.tf_master:
FLAGS.tf_master = target
with tf.Session(target).as_default():
value = (tf.constant(1.) + tf.constant(1.)).eval()
assert value == 2.0, 'Something is really wrong.'
tf.logging.info('Launched tensorflow.')
def GetExecutorParams(self):
"""Get the params needed to instantiate the ExecutorTpu.
Returns:
Tuple (dict, params):
- ps_params_dict: high_level task_name -> ProgramScheduleParams
- train_cfg: Either a SingleTaskModelParams or MultiTaskModelParams.
"""
cluster = cluster_factory.Current()
self.UpdateClusterParamsFromFlags(cluster.params, 'executor_tpu')
ps_params_dict, train_cfg = executor.GetExecutorParams(
self._model_name, cluster.params, self.model_registry)
return ps_params_dict, train_cfg
def GetParamsForDataset(self, job_name, dataset_name):
"""Returns params for job `job_name` on the dataset `dataset_name`."""
# Get the current cluster and update its params from flags.
cluster = cluster_factory.Current()
self.UpdateClusterParamsFromFlags(cluster.params, job_name)
with cluster_factory.Cluster(cluster.params):
try:
cfg = self.model_registry.GetParams(self._model_name, dataset_name)
except base_model_params.DatasetError as e:
dataset_name_retry = dataset_name.title()
tf.logging.warning(
'Exception configuring dataset %s, retrying as %s: %s',
dataset_name, dataset_name_retry, e)
cfg = self.model_registry.GetParams(self._model_name,
dataset_name_retry)
tf.logging.warning('Succeeded after retrying as %s.' %
dataset_name_retry)
cfg.cluster = cluster.params
# Updates a few params based on flags.
if FLAGS.enqueue_max_steps is not None:
cfg.train.enqueue_max_steps = FLAGS.enqueue_max_steps
if FLAGS.saver_max_to_keep is not None:
cfg.train.save_max_to_keep = FLAGS.saver_max_to_keep
if FLAGS.saver_keep_checkpoint_every_n_hours is not None:
cfg.train.save_keep_checkpoint_every_n_hours = FLAGS.saver_keep_checkpoint_every_n_hours
return cfg
def MaybeConfigRunDistributed(self):
"""If given a `FLAGS.cluster_spec`, update flags for running distributed."""
if not FLAGS.cluster_spec:
return
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = _GetClusterSpecDict()
if FLAGS.job == 'trainer_client':
FLAGS.tf_master = 'grpc://%s' % cluster_spec_dict['worker'][FLAGS.task]
for job in cluster_spec_dict:
if job.startswith('decoder_'):
assert len(job_specs) == 1, 'Decoder jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.decoder_job = '/job:%s' % job
FLAGS.decoder_replicas = 1
if job.startswith('evaler_'):
assert len(job_specs) == 1, 'Evaler jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.evaler_job = '/job:%s' % job
FLAGS.evaler_replicas = 1
if FLAGS.mode == 'sync' and FLAGS.job in ('controller', 'trainer_client',
'worker', 'executor_tpu'):
FLAGS.worker_job = '/job:worker'
FLAGS.worker_replicas = len(cluster_spec_dict['worker'])
FLAGS.ps_job = '/job:worker'
FLAGS.ps_replicas = FLAGS.worker_replicas
if FLAGS.mode == 'async' and FLAGS.job in ('controller', 'trainer', 'ps'):
FLAGS.worker_job = '/job:trainer'
FLAGS.worker_replicas = len(cluster_spec_dict['trainer'])
FLAGS.ps_job = '/job:ps'
FLAGS.ps_replicas = len(cluster_spec_dict['ps'])
def MaybeConfigCloudTpu(self):
"""If given `FLAGS.tpu`, update flags for running on a Cloud TPU."""
if not FLAGS.tpu:
return
if not FLAGS.job:
FLAGS.job = 'trainer_client'
if FLAGS.job not in ('trainer_client', 'executor_tpu'):
raise ValueError('Only trainer_client and executor_tpu jobs are '
'supported on TPU.')
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
project=FLAGS.gcp_project,
zone=FLAGS.tpu_zone,
job_name=FLAGS.job)
cluster_spec_dict = cluster_resolver.cluster_spec().as_dict()
FLAGS.mode = 'sync'
FLAGS.tf_master = cluster_resolver.master()
FLAGS.worker_job = '/job:{}'.format(FLAGS.job)
FLAGS.worker_replicas = 1
FLAGS.worker_num_tpu_hosts = len(cluster_spec_dict[FLAGS.job])
FLAGS.worker_tpus = (
cluster_resolver.num_accelerators()['TPU'] * FLAGS.worker_num_tpu_hosts)
FLAGS.ps_job = FLAGS.worker_job
if FLAGS.job == 'trainer_client':
FLAGS.ps_replicas = FLAGS.worker_replicas
FLAGS.cluster_spec = ('@'.join('{}={}'.format(job, ','.join(hosts))
for job, hosts in cluster_spec_dict.items()))
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
FLAGS.checkpoint_in_trainer_tpu = True
def UpdateClusterParamsFromFlags(self, cluster, job_name):
"""Update `cluster` with a training cluster configuration from flags."""
cluster.mode = FLAGS.mode
cluster.job = job_name
cluster.task = FLAGS.task
cluster.do_eval = job_name in ['evaler', 'decoder']
cluster.logdir = FLAGS.logdir
cluster.controller.name = FLAGS.controller_job
cluster.controller.gpus_per_replica = FLAGS.controller_gpus
cluster.worker.name = FLAGS.worker_job
cluster.worker.replicas = FLAGS.worker_replicas
cluster.worker.gpus_per_replica = FLAGS.worker_gpus
cluster.worker.tpus_per_replica = FLAGS.worker_tpus
cluster.worker.num_tpu_hosts = FLAGS.worker_num_tpu_hosts
cluster.worker.devices_per_split = FLAGS.worker_split_size
if FLAGS.additional_worker_jobs:
for additional_job in FLAGS.additional_worker_jobs:
cluster.worker.additional_worker_names.append(additional_job)
if FLAGS.tpu:
job_name = cluster.worker.name.replace('/job:', '', 1)
worker_hosts = _GetClusterSpecDict()[job_name]
if FLAGS.additional_worker_jobs:
for additional_job in cluster.worker.additional_worker_names:
additional_job_name = additional_job.replace('/job:', '', 1)
worker_hosts.extend(_GetClusterSpecDict()[additional_job_name])
cluster.worker.targets = ','.join(
'grpc://{}'.format(host) for host in worker_hosts)
cluster.ps.name = FLAGS.ps_job
cluster.ps.replicas = FLAGS.ps_replicas
cluster.ps.gpus_per_replica = FLAGS.ps_gpus
cluster.input.name = FLAGS.input_job
cluster.input.replicas = FLAGS.input_replicas
cluster.input.targets = FLAGS.input_targets
cluster.evaler.name = FLAGS.evaler_job
cluster.evaler.replicas = FLAGS.evaler_replicas
cluster.evaler.gpus_per_replica = FLAGS.evaler_gpus
cluster.decoder.name = FLAGS.decoder_job
cluster.decoder.replicas = FLAGS.decoder_replicas
cluster.decoder.gpus_per_replica = FLAGS.decoder_gpus
cluster.tf_data_service_address = FLAGS.tf_data_service_address
cluster.add_summary = FLAGS.add_summary
cluster.reporting_job = FLAGS.vizier_reporting_job
def _CreateRunner(self, job, model_task_name, logdir, tf_master, trial):
"""Create a runner."""
evaler_job_name_prefix = 'evaler_'
decoder_job_name_prefix = 'decoder_'
tf.logging.info('Job %s start', job)
common_args = (model_task_name, logdir, tf_master, trial)
if job == 'controller':
cfg = self.GetParamsForDataset('controller', 'Train')
cfg.cluster.xla_device = 'cpu'
return self.Controller(cfg, *common_args)
elif job == 'trainer':
cfg = self.GetParamsForDataset('trainer', 'Train')
return self.Trainer(cfg, *common_args)
elif job == 'trainer_client':
cfg = self.GetParamsForDataset('trainer_client', 'Train')
if py_utils.use_tpu():
cfg.cluster.xla_device = 'tpu'
return self.TrainerTpu(cfg, *common_args)
else:
return self.Trainer(cfg, *common_args)
elif job.startswith(evaler_job_name_prefix):
dataset_name = job[len(evaler_job_name_prefix):]
cfg = self.GetParamsForDataset('evaler', dataset_name)
return self.Evaler(dataset_name.lower(), cfg, *common_args)
elif job.startswith(decoder_job_name_prefix):
dataset_name = job[len(decoder_job_name_prefix):]
cfg = self.GetParamsForDataset('decoder', dataset_name)
return self.Decoder(dataset_name.lower(), cfg, *common_args)
elif job in ('ps', 'worker', 'input'):
self._tf_server.join()
elif job == 'executor_tpu':
ps_cfg_dict, train_cfg = self.GetExecutorParams()
return self.ExecutorTpu(train_cfg, ps_cfg_dict, *common_args)
else:
raise ValueError('job %s is not supported' % job)
def CreateRunners(self, jobs, logdir, trial=base_trial.NoOpTrial()):
"""Creates a list of runners based on `FLAGS.mode`.
Args:
jobs: a list of runner jobs.
logdir: the directory used for logging, usually on CNS.
trial: optional `Trial` object, used for reporting measures and early
stopping.
Returns:
A list of `.BaseRunner`, one per job in `jobs`.
"""
runners = []
is_training = 'trainer' in jobs or 'trainer_client' in jobs
for j in jobs:
tf_master = FLAGS.tf_master
# Ensure that decoder or evaler threads do not clobber variables being
# updated by trainer by forcing them to use independent sessions.
if (is_training and (j.startswith('decoder') or j.startswith('evaler'))):
tf_master = ''
runner = self._CreateRunner(j, FLAGS.model_task_name, logdir, tf_master,
trial)
runners.append(runner)
return runners
def StartRunners(self, runners):
"""Runs `runners` in parallel threads.
Returns when all of them finish.
Args:
runners: a list of `.BaseRunner`.
Returns:
None.
"""
threads = []
tf.logging.info('Starting runners')
for runner in runners:
runner_class_name = str(runner)
t = threading.Thread(target=runner.Start, name=runner_class_name)
t.daemon = True
t.start()
threads.append(t)
if runner.enqueue_ops:
tf.logging.info('Total num runner.enqueue_ops: %d',
len(runner.enqueue_ops))
for i, enqueue_op in enumerate(runner.enqueue_ops):
def StartEnqueue(runner, op):
tf.logging.info('Starting enqueue op %s', op.name)
return lambda: runner.StartEnqueueOp(op)
enqueue_name = '%s-enqueue-%d' % (runner_class_name, i)
tq = threading.Thread(
target=StartEnqueue(runner, enqueue_op), name=enqueue_name)
tq.start()
threads.append(tq)
tf.logging.info('Waiting for runners to finish...')
for t in threads:
tf.logging.info('Waiting for thread to finish: %s' % t.name)
while True:
t.join(1)
if not t.is_alive():
break
tf.logging.info('All runners done.')
def RunTrial(self, job, logdir, trial):
"""A wrapper function for running a trial."""
# Run each job in separate process/task
# TODO(rpang): add support for running evaler_test and decoder.
self.StartRunners(self.CreateRunners([job], logdir, trial))
def MaybeConfigRunLocally(self):
"""Update flags if configured to run locally."""
if not FLAGS.run_locally:
# Do nothing
return
FLAGS.tf_master = tf.distribute.Server.create_local_server().target
if not FLAGS.mode:
FLAGS.mode = 'sync'
if not FLAGS.job:
if FLAGS.run_locally == 'tpu':
FLAGS.job = 'trainer_client'
elif FLAGS.mode == 'async':
FLAGS.job = 'controller,trainer'
else:
FLAGS.job = 'controller,trainer_client'
FLAGS.task = 0
local_job = '/job:localhost'
FLAGS.controller_job = local_job
FLAGS.worker_job = local_job
FLAGS.worker_replicas = 1
if FLAGS.run_locally == 'gpu':
if not FLAGS.worker_gpus:
FLAGS.worker_gpus = 1
else:
FLAGS.worker_gpus = 0
if FLAGS.run_locally == 'tpu':
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
else:
FLAGS.worker_tpus = 0
if not FLAGS.worker_split_size:
FLAGS.worker_split_size = 1
FLAGS.ps_job = local_job
FLAGS.ps_replicas = 1
FLAGS.ps_gpus = 0
FLAGS.input_job = local_job
FLAGS.input_replicas = 0
FLAGS.evaler_job = local_job
FLAGS.evaler_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.evaler_gpus = 1
else:
FLAGS.evaler_gpus = 0
FLAGS.decoder_job = local_job
FLAGS.decoder_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.decoder_gpus = 1
else:
FLAGS.decoder_gpus = 0
def InspectParams(self):
r"""Print out all the params.
An example to run this mode:
bazel-bin/lingvo/trainer --logtostderr \
--model=image.mnist.LeNet5 --mode=inspect_params --logdir=/tmp/lenet5 \
--run_locally=cpu
"""
FLAGS.mode = 'sync'
cls = self.model_registry.GetClass(self._model_name)
tf.io.gfile.makedirs(FLAGS.logdir)
for dataset in datasets.GetDatasets(cls):
p = self.GetParamsForDataset('controller', dataset)
outf = os.path.join(FLAGS.logdir, dataset.lower() + '-params.txt')
tf.logging.info('Write all params for {} to {}'.format(dataset, outf))
with tf.io.gfile.GFile(outf, 'w') as f:
f.write(p.ToText())
def InspectModel(self):
"""Prints out model analysis for the model."""
FLAGS.mode = 'sync'
p = self.GetParamsForDataset('controller', 'Train')
c = cluster_factory.Cluster(p.cluster)
with tf.Graph().as_default(), c, tf.device(c.GetPlacer()):
analysis, _ = summary_utils.ModelAnalysis(p.Instantiate())
print(analysis)
def InspectDatasets(self):
"""Prints out datasets configured for the model."""
cls = self.model_registry.GetClass(self._model_name)
print(','.join([dataset.lower() for dataset in datasets.GetDatasets(cls)]))
def InspectDecoder(self):
"""Prints out datasets configured for the decoder."""
cls = self.model_registry.GetClass(self._model_name)
params = cls()
has_decoder = False
if issubclass(cls, base_model_params.SingleTaskModelParams):
has_decoder = params.Task(
).cls.CreateDecoderMetrics != base_model.BaseTask.CreateDecoderMetrics
else:
for _, task_param in params.Model().task_params.IterParams():
has_decoder |= (
task_param.cls.CreateDecoderMetrics !=
base_model.BaseTask.CreateDecoderMetrics)
if has_decoder:
# We assume that the proper decoder is implemented.
self.InspectDatasets()
else:
print('')
def SetModelName(self, model_name):
"""Sets the model name."""
self._model_name = model_name
def WriteInferenceGraph(self):
"""Generates the inference graphs for a given model."""
inference_graph_dir = os.path.join(FLAGS.logdir, 'inference_graphs')
tf.io.gfile.makedirs(inference_graph_dir)
tf.logging.info('Writing inference graphs to dir: %s', inference_graph_dir)
cfg = self.model_registry.GetParams(self._model_name,
FLAGS.inference_dataset_name)
task_names = [FLAGS.model_task_name]
if (issubclass(cfg.cls, base_model.MultiTaskModel) and
not FLAGS.model_task_name):
task_names = base_model.MultiTaskModel.TaskNames(cfg)
inference_graph_proto = None
if FLAGS.inference_graph_filename:
# Custom inference graph.
for task_name in task_names:
filename_prefix = FLAGS.inference_graph_filename
if task_name:
filename_prefix = '%s_inference' % task_name
filename_prefix = os.path.join(inference_graph_dir, filename_prefix)
device = ''
var_options = None
if FLAGS.inference_graph_device == 'tpu':
device = 'tpu'
var_options = 'ON_DEVICE'
device_options = inference_graph_exporter.InferenceDeviceOptions(
device=device,
retain_device_placement=False,
var_options=var_options,
gen_init_op=FLAGS.inference_gen_tpu_init_op,
dtype_override=None,
fprop_dtype_override=None)
inference_graph_proto = (
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=task_name,
device_options=device_options,
export_path=filename_prefix + '.pbtxt',
random_seed=FLAGS.inference_graph_random_seed))
else:
for task_name in task_names:
filename_prefix = 'inference'
if task_name:
filename_prefix = '%s_inference' % task_name
filename_prefix = os.path.join(inference_graph_dir, filename_prefix)
# Standard inference graph.
try:
inference_graph_proto = (
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=task_name,
export_path=filename_prefix + '.pbtxt',
random_seed=FLAGS.inference_graph_random_seed))
except NotImplementedError as e:
tf.logging.error('Cannot write inference graph: %s', e)
# TPU inference graph. Not all models support it so fail silently.
try:
device_options = self.inference_graph_exporter.InferenceDeviceOptions(
device='tpu',
retain_device_placement=False,
var_options='ON_DEVICE',
gen_init_op=FLAGS.inference_gen_tpu_init_op,
dtype_override=None,
fprop_dtype_override=None)
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=task_name,
device_options=device_options,
export_path=filename_prefix + '_tpu.pbtxt',
random_seed=FLAGS.inference_graph_random_seed)
except Exception as e: # pylint: disable=broad-except
tf.logging.error('Error exporting TPU inference graph: %s' % e)
if FLAGS.graph_def_filename and inference_graph_proto:
for graph_def_filename in FLAGS.graph_def_filename:
tf.logging.info('Writing graphdef: %s', graph_def_filename)
dir_path = os.path.dirname(graph_def_filename)
if (not tf.io.gfile.exists(dir_path) or
not tf.io.gfile.isdir(dir_path)):
tf.io.gfile.makedirs(dir_path)
with tf.io.gfile.GFile(graph_def_filename, 'w') as f:
f.write(text_format.MessageToString(inference_graph_proto.graph_def))
def RunEvalerOnce(self):
"""Run once evaler."""
m = re.match(r'evaler_once_([^_@]+)@(\d+)', FLAGS.job)
dataset_name, ckpt_id = m.group(1), int(m.group(2))
cfg = self.GetParamsForDataset('evaler', dataset_name)
evaler = self.Evaler(dataset_name.lower(), cfg, FLAGS.model_task_name,
FLAGS.logdir, FLAGS.tf_master)
evaler.EvalCheckpoint(ckpt_id)
def Start(self):
"""Start the process."""
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('tf_api_version: %s', tf.summarize_tf2_status())
if FLAGS.mode == 'inspect_params':
self.InspectParams()
return
if FLAGS.mode == 'inspect_model':
self.InspectModel()
return
if FLAGS.mode == 'inspect_evaler':
self.InspectDatasets()
return
if FLAGS.mode == 'inspect_decoder':
self.InspectDecoder()
return
if FLAGS.mode == 'write_inference_graph':
self.WriteInferenceGraph()
return
if FLAGS.mode == 'shell':
_StartShell(locals())
return
assert FLAGS.mode in ['sync', 'async']
self.MaybeConfigRunLocally()
self.MaybeConfigRunDistributed()
self.MaybeConfigCloudTpu()
self.MaybeLaunchTensorFlow()
if FLAGS.job.startswith('evaler_once_'):
# E.g., trainer --model=foo.bar.Model --logdir=...
# --run_locally=cpu --mode=sync --job=evaler_once_test@65200
self.RunEvalerOnce()
return
self.StartRunners(self.CreateRunners(FLAGS.job.split(','), FLAGS.logdir))
def main(unused_argv):
RunnerManager(FLAGS.model).Start()
if __name__ == '__main__':
tf.disable_eager_execution()
tf.flags.mark_flag_as_required('model')
FLAGS(sys.argv, known_only=True)
if FLAGS.disable_tf2:
tf.disable_v2_behavior()
model_imports.ImportParams(FLAGS.model)
FLAGS.unparse_flags()
tf.app.run(main)
|
authorization_code.py
|
#!/usr/bin/python3
# coding: utf-8
from local_server import runHTTPServer, acode
import threading
import requests
from pprint import pprint
import json
#from manager import uuid
authority_url = 'http://127.0.0.1:5000/oauth/authorize'
token_url = 'http://127.0.0.1:5000/oauth/token'
api_port=5000
auth_url='http://127.0.0.1:5000/oauth/authorize'
server_ip='127.0.0.1'
server_port=3000
#redirect_url='http://127.0.0.1:3000'
redirect_uri='http%3A%2F%2F127.0.0.1%3A'+str(server_port)
client_id='9a195ac5-1a34-4bdd-837e-13f80bc5364d'
print('Check client_id (../manage_py --id=1 uuid):')
code_url=auth_url+('?redirect_uri=%s&client_id=%s' % (redirect_uri,client_id))+\
'&response_type=code&state=state_test&response_mode=query'
class TestApp:
def __init__(self):
self.config={}
self.config["client_id"]='9a195ac5-1a34-4bdd-837e-13f80bc5364d' #uuid(1)
self.config["scopes"]='test'
self.ip=server_ip
self.port=server_port
self.config["redirect_uri"]='http://%s:%s' % (server_ip,server_port)
def get_token(self, code):
params = {
'grant_type': 'authorization_code',
'response_type': 'token',
'code': code,
'client_id': self.config["client_id"],
'scopes': self.config["scopes"],
'redirect_uri': self.config["redirect_uri"]
}
try:
resp = requests.post(token_url, params)
respdata = json.loads(resp.text)
if 'access_token' in respdata:
self._access_token=respdata['access_token']
return self._access_token
else:
pprint(respdata)
return None
except Exception as e:
print(e)
return None
def run_query(self, query, req_method, headers=None, req_body=None):
if not self._access_token:
print('Brak tokenu')
return
req_headers = {
'Authorization': 'Bearer ' + self._access_token,
'Accept': '*/*',
'Content-Type': 'application/json'
}
if headers:
for key in headers:
req_headers[key] = headers[key]
data = None
if req_method == "POST":
data = requests.post(query, headers=req_headers,
json=json.dumps(req_body)).json()
if req_method == "GET":
data = requests.get(query, headers=req_headers)
if req_method == "PUT":
data = requests.put(query, data=req_body, headers=req_headers).json()
return data
def get_test(self):
test_endpoint=''
body = {
}
self.run_query(test_endpoint, "POST", None, body)
def start_httpd(ip,port):
runHTTPServer(ip,port)
def server_thread():
start_httpd(server_ip, server_port)
try:
thread_type = threading.Thread(target=server_thread)
thread_type.start()
thread_type.join(2)
demo_login = {"user": "demo", "password": "demo"}
session = requests.Session()
login = session.post('http://localhost:%d/json_login' % api_port, None, demo_login)
cookies=session.cookies.get_dict()
response=session.get(code_url, cookies=cookies)
try:
global acode
code=acode[0]
# pprint(response.text)
print('code=',code)
except Exception as e:
print(e)
if code:
my_app = TestApp()
token=my_app.get_token(code)
print('token=',token)
# if token:
# print(my_app.get_test())
print('koniec testu')
except Exception as e:
print("Error: %s" % e)
|
tasks.py
|
import os
from threading import Thread
from invoke import run, task
from time import sleep
@task
def dropdb(context):
from palaverapi.models import database, Device, Token
Token.drop_table(True)
Device.drop_table(True)
@task
def syncdb(context):
from palaverapi.models import database, Device, Token
Device.create_table()
Token.create_table()
def configure_db():
database_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'tests.sqlite'
)
os.environ['DATABASE_URL'] = 'sqlite:///{}'.format(database_path)
run('invoke dropdb')
run('invoke syncdb')
@task
def tests(context):
configure_db()
run('pytest')
@task
def test_blueprint(context):
configure_db()
from rivr import serve
from palaverapi.views import router
thread = Thread(target=serve, args=(router,))
thread.start()
run('dredd ./apiary.apib http://localhost:8080/')
@task
def cleanup(context):
import sys
import math
from palaverapi.utils import load_apns_client, TOPIC
from palaverapi.models import Device
from apns2.errors import BadDeviceToken, Unregistered
from apns2.payload import Payload
apns_client = load_apns_client()
payload = Payload() # No alert is shown to recipient if payload is empty.
stepsize = 500
total = Device.select().count()
steps = int(math.ceil(float(total) / float(stepsize)))
removed_devices = 0
print('Currently {} devices in database.'.format(total))
for i in range(0, steps):
# Print progress percentage
frac = float(i * stepsize) / float(total)
sys.stdout.write('\r{:>6.1%}'.format(frac))
sys.stdout.flush()
devices = Device.select().limit(stepsize).offset(i * stepsize).execute()
for device in devices:
try:
client.send_notification(device.apns_token, payload, TOPIC)
except (BadDeviceToken, Unregistered) as e:
device.delete_instance(recursive=True)
removed_devices += 1
sleep(10)
print('\nDone! Removed {} devices.'.format(removed_devices))
|
snapshotter.py
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import urlparse
import httplib
import traceback
import datetime
import ConfigParser
import multiprocessing as mp
import datetime
from common import CommonVariables
from HttpUtil import HttpUtil
from Utils import Status
from Utils import HandlerUtil
from fsfreezer import FsFreezer
class SnapshotInfoIndexerObj():
def __init__(self, index, isSuccessful, snapshotTs, errorMessage):
self.index = index
self.isSuccessful = isSuccessful
self.snapshotTs = snapshotTs
self.errorMessage = errorMessage
def __str__(self):
return 'index: ' + str(self.index) + ' isSuccessful: ' + str(self.isSuccessful) + ' snapshotTs: ' + str(self.snapshotTs) + ' errorMessage: ' + str(self.errorMessage)
class SnapshotError(object):
def __init__(self):
self.errorcode = CommonVariables.success
self.sasuri = None
def __str__(self):
return 'errorcode: ' + str(self.errorcode)
class SnapshotResult(object):
def __init__(self):
self.errors = []
def __str__(self):
error_str = ""
for error in self.errors:
error_str+=(str(error)) + "\n"
return error_str
class Snapshotter(object):
"""description of class"""
def __init__(self, logger):
self.logger = logger
self.configfile='/etc/azure/vmbackup.conf'
def snapshot(self, sasuri, sasuri_index, meta_data, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger):
temp_logger=''
error_logger=''
snapshot_error = SnapshotError()
snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)
if(sasuri is None):
error_logger = error_logger + str(datetime.datetime.now()) + " Failed to do the snapshot because sasuri is none "
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
try:
sasuri_obj = urlparse.urlparse(sasuri)
if(sasuri_obj is None or sasuri_obj.hostname is None):
error_logger = error_logger + str(datetime.datetime.now()) + " Failed to parse the sasuri "
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
else:
start_time = datetime.datetime.utcnow()
body_content = ''
headers = {}
headers["Content-Length"] = '0'
if(meta_data is not None):
for meta in meta_data:
key = meta['Key']
value = meta['Value']
headers["x-ms-meta-" + key] = value
temp_logger = temp_logger + str(headers)
http_util = HttpUtil(self.logger)
sasuri_obj = urlparse.urlparse(sasuri + '&comp=snapshot')
temp_logger = temp_logger + str(datetime.datetime.now()) + ' start calling the snapshot rest api. '
# initiate http call for blob-snapshot and get http response
result, httpResp, errMsg = http_util.HttpCallGetResponse('PUT', sasuri_obj, body_content, headers = headers)
if(result == CommonVariables.success and httpResp != None):
# retrieve snapshot information from http response
snapshot_info_indexer, snapshot_error, message = self.httpresponse_get_snapshot_info(httpResp, sasuri_index, sasuri)
temp_logger = temp_logger + str(datetime.datetime.now()) + ' httpresponse_get_snapshot_info message: ' + str(message)
else:
# HttpCall failed
error_logger = error_logger + str(datetime.datetime.now()) + " snapshot HttpCallGetResponse failed "
error_logger = error_logger + str(datetime.datetime.now()) + str(errMsg)
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
end_time = datetime.datetime.utcnow()
time_taken=end_time-start_time
temp_logger = temp_logger + str(datetime.datetime.now()) + ' time taken for snapshot ' + str(time_taken)
except Exception as e:
errorMsg = " Failed to do the snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
error_logger = error_logger + str(datetime.datetime.now()) + errorMsg
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
temp_logger=temp_logger + str(datetime.datetime.now()) + ' snapshot ends..'
global_logger.put(temp_logger)
global_error_logger.put(error_logger)
snapshot_result_error.put(snapshot_error)
snapshot_info_indexer_queue.put(snapshot_info_indexer)
def snapshot_seq(self, sasuri, sasuri_index, meta_data):
result = None
snapshot_error = SnapshotError()
snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)
if(sasuri is None):
self.logger.log("Failed to do the snapshot because sasuri is none",False,'Error')
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
try:
sasuri_obj = urlparse.urlparse(sasuri)
if(sasuri_obj is None or sasuri_obj.hostname is None):
self.logger.log("Failed to parse the sasuri",False,'Error')
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
else:
body_content = ''
headers = {}
headers["Content-Length"] = '0'
if(meta_data is not None):
for meta in meta_data:
key = meta['Key']
value = meta['Value']
headers["x-ms-meta-" + key] = value
self.logger.log(str(headers))
http_util = HttpUtil(self.logger)
sasuri_obj = urlparse.urlparse(sasuri + '&comp=snapshot')
self.logger.log("start calling the snapshot rest api")
# initiate http call for blob-snapshot and get http response
result, httpResp, errMsg = http_util.HttpCallGetResponse('PUT', sasuri_obj, body_content, headers = headers)
if(result == CommonVariables.success and httpResp != None):
# retrieve snapshot information from http response
snapshot_info_indexer, snapshot_error, message = self.httpresponse_get_snapshot_info(httpResp, sasuri_index, sasuri)
self.logger.log(' httpresponse_get_snapshot_info message: ' + str(message))
else:
# HttpCall failed
self.logger.log(" snapshot HttpCallGetResponse failed ")
self.logger.log(str(errMsg))
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
except Exception as e:
errorMsg = "Failed to do the snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg, False, 'Error')
snapshot_error.errorcode = CommonVariables.error
snapshot_error.sasuri = sasuri
return snapshot_error, snapshot_info_indexer
def snapshotall_parallel(self, paras, freezer, thaw_done):
self.logger.log("doing snapshotall now in parallel...")
snapshot_result = SnapshotResult()
snapshot_info_array = []
all_failed = True
exceptOccurred = False
is_inconsistent = False
thaw_done_local = thaw_done
unable_to_sleep = False
try:
mp_jobs = []
global_logger = mp.Queue()
global_error_logger = mp.Queue()
snapshot_result_error = mp.Queue()
snapshot_info_indexer_queue = mp.Queue()
time_before_snapshot_start = datetime.datetime.now()
blobs = paras.blobs
if blobs is not None:
# initialize snapshot_info_array
mp_jobs = []
blob_index = 0
for blob in blobs:
blobUri = blob.split("?")[0]
self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri))
snapshot_info_array.append(Status.SnapshotInfoObj(False, blobUri, None))
mp_jobs.append(mp.Process(target=self.snapshot,args=(blob, blob_index, paras.backup_metadata, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger)))
blob_index = blob_index + 1
for job in mp_jobs:
job.start()
time_after_snapshot_start = datetime.datetime.now()
timeout = self.get_value_from_configfile('timeout')
if timeout == None:
timeout = 60
for job in mp_jobs:
job.join()
thaw_result = None
if thaw_done_local == False:
time_before_thaw = datetime.datetime.now()
thaw_result, unable_to_sleep = freezer.thaw_safe()
time_after_thaw = datetime.datetime.now()
HandlerUtil.HandlerUtility.add_to_telemetery_data("ThawTime", str(time_after_thaw-time_before_thaw))
thaw_done_local = True
self.logger.log('T:S thaw result ' + str(thaw_result))
if(thaw_result is not None and len(thaw_result.errors) > 0):
is_inconsistent = True
snapshot_result.errors.append(thaw_result.errors)
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
self.logger.log('end of snapshot process')
logging = [global_logger.get() for job in mp_jobs]
self.logger.log(str(logging))
error_logging = [global_error_logger.get() for job in mp_jobs]
self.logger.log(error_logging,False,'Error')
if not snapshot_result_error.empty():
results = [snapshot_result_error.get() for job in mp_jobs]
for result in results:
if(result.errorcode != CommonVariables.success):
snapshot_result.errors.append(result)
if not snapshot_info_indexer_queue.empty():
snapshot_info_indexers = [snapshot_info_indexer_queue.get() for job in mp_jobs]
for snapshot_info_indexer in snapshot_info_indexers:
# update snapshot_info_array element properties from snapshot_info_indexer object
self.get_snapshot_info(snapshot_info_indexer, snapshot_info_array[snapshot_info_indexer.index])
if (snapshot_info_array[snapshot_info_indexer.index].isSuccessful == True):
all_failed = False
self.logger.log("index: " + str(snapshot_info_indexer.index) + " blobSnapshotUri: " + str(snapshot_info_array[snapshot_info_indexer.index].snapshotUri))
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
else:
self.logger.log("the blobs are None")
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
except Exception as e:
errorMsg = " Unable to perform parallel snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg)
exceptOccurred = True
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
def snapshotall_seq(self, paras, freezer, thaw_done):
exceptOccurred = False
self.logger.log("doing snapshotall now in sequence...")
snapshot_result = SnapshotResult()
snapshot_info_array = []
all_failed = True
is_inconsistent = False
thaw_done_local = thaw_done
unable_to_sleep = False
try:
blobs = paras.blobs
if blobs is not None:
blob_index = 0
for blob in blobs:
blobUri = blob.split("?")[0]
self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri))
snapshot_info_array.append(Status.SnapshotInfoObj(False, blobUri, None))
snapshotError, snapshot_info_indexer = self.snapshot_seq(blob, blob_index, paras.backup_metadata)
if(snapshotError.errorcode != CommonVariables.success):
snapshot_result.errors.append(snapshotError)
# update snapshot_info_array element properties from snapshot_info_indexer object
self.get_snapshot_info(snapshot_info_indexer, snapshot_info_array[blob_index])
if (snapshot_info_array[blob_index].isSuccessful == True):
all_failed = False
blob_index = blob_index + 1
thaw_result= None
if thaw_done_local== False:
time_before_thaw = datetime.datetime.now()
thaw_result, unable_to_sleep = freezer.thaw_safe()
time_after_thaw = datetime.datetime.now()
HandlerUtil.HandlerUtility.add_to_telemetery_data("ThawTime", str(time_after_thaw-time_before_thaw))
thaw_done_local = True
self.logger.log('T:S thaw result ' + str(thaw_result))
if(thaw_result is not None and len(thaw_result.errors) > 0):
snapshot_result.errors.append(thaw_result.errors)
is_inconsistent= True
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
else:
self.logger.log("the blobs are None")
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
except Exception as e:
errorMsg = " Unable to perform sequential snapshot with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg)
exceptOccurred = True
return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep
def get_value_from_configfile(self, key):
value = None
configfile = '/etc/azure/vmbackup.conf'
try :
if os.path.exists(configfile):
config = ConfigParser.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread',key):
value = config.get('SnapshotThread',key)
else:
self.logger.log("Config File doesn't have the key :" + key)
except Exception as e:
errorMsg = " Unable to ed config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg)
return value
def snapshotall(self, paras, freezer):
thaw_done = False
if (self.get_value_from_configfile('doseq') == '1') or (len(paras.blobs) <= 4):
snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done, unable_to_sleep = self.snapshotall_seq(paras, freezer, thaw_done)
else:
snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done, unable_to_sleep = self.snapshotall_parallel(paras, freezer, thaw_done)
if exceptOccurred and thaw_done == False:
snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent,thaw_done, unable_to_sleep = self.snapshotall_seq(paras, freezer, thaw_done)
return snapshot_result, snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep
def httpresponse_get_snapshot_info(self, resp, sasuri_index, sasuri):
snapshot_error = SnapshotError()
snapshot_info_indexer = SnapshotInfoIndexerObj(sasuri_index, False, None, None)
result = CommonVariables.error_http_failure
message = ""
if(resp != None):
message = message + str(datetime.datetime.now()) + " snapshot resp status: " + str(resp.status) + " "
resp_headers = resp.getheaders()
message = message + str(datetime.datetime.now()) + " snapshot resp-header: " + str(resp_headers) + " "
if(resp.status == 200 or resp.status == 201):
result = CommonVariables.success
snapshot_info_indexer.isSuccessful = True
snapshot_info_indexer.snapshotTs = resp.getheader('x-ms-snapshot')
else:
result = resp.status
snapshot_info_indexer.errorMessage = resp.status
else:
message = message + str(datetime.datetime.now()) + " snapshot Http connection response is None" + " "
message = message + str(datetime.datetime.now()) + ' snapshot api returned: {0} '.format(result) + " "
if(result != CommonVariables.success):
snapshot_error.errorcode = result
snapshot_error.sasuri = sasuri
return snapshot_info_indexer, snapshot_error, message
def get_snapshot_info(self, snapshot_info_indexer, snapshot_info):
if (snapshot_info_indexer != None):
self.logger.log("snapshot_info_indexer: " + str(snapshot_info_indexer))
snapshot_info.isSuccessful = snapshot_info_indexer.isSuccessful
if (snapshot_info.isSuccessful == True):
snapshot_info.snapshotUri = snapshot_info.snapshotUri + "?snapshot=" + str(snapshot_info_indexer.snapshotTs)
else:
snapshot_info.snapshotUri = None
snapshot_info.errorMessage = snapshot_info_indexer.errorMessage
else:
snapshot_info.isSuccessful = False
snapshot_info.snapshotUri = None
|
build.py
|
## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import StringIO
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import itertools
import multiprocessing
from struct import *
from threading import *
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import LongFilePath
from Common.TargetTxtClassObject import *
from Common.ToolDefClassObject import *
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2017, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if os.environ.has_key('PATHEXT'):
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData="%s" % WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData="%s" % Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = mws.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
if not os.path.exists(EcpSourceDir):
EdkLogger.verbose("ECP_SOURCE = %s doesn't exist. Edk modules could not be built." % EcpSourceDir)
elif ' ' in EcpSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in ECP_SOURCE path",
ExtraData=EcpSourceDir)
if not os.path.exists(EdkSourceDir):
if EdkSourceDir == EcpSourceDir:
EdkLogger.verbose("EDK_SOURCE = %s doesn't exist. Edk modules could not be built." % EdkSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE does not exist",
ExtraData=EdkSourceDir)
elif ' ' in EdkSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EDK_SOURCE path",
ExtraData=EdkSourceDir)
if not os.path.exists(EfiSourceDir):
if EfiSourceDir == EcpSourceDir:
EdkLogger.verbose("EFI_SOURCE = %s doesn't exist. Edk modules could not be built." % EfiSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE does not exist",
ExtraData=EfiSourceDir)
elif ' ' in EfiSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EFI_SOURCE path",
ExtraData=EfiSourceDir)
# check those variables on single workspace case
if not PackagesPath:
# change absolute path to relative path to WORKSPACE
if EfiSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EFI_SOURCE = %s" % (WorkspaceDir, EfiSourceDir))
if EdkSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EDK_SOURCE = %s" % (WorkspaceDir, EdkSourceDir))
if EcpSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "ECP_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n ECP_SOURCE = %s" % (WorkspaceDir, EcpSourceDir))
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line != None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure != None:
EndOfProcedure.set()
if Proc == None:
if type(Command) != type(""):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if type(Command) != type(""):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other != None and self.BuildObject == Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = sdict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = sdict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = sdict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = sdict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = BuildTask._PendingQueue.keys()
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo = BuildTask._ReadyQueue.keys()[0]
Bt = BuildTask._ReadyQueue.pop(Bo)
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join([Th.getName() for Th in threading.enumerate()]))
# avoid tense loop
time.sleep(0.1)
except BaseException, X:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency == None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size / 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = BuildOptions.GenfdsMultiThread
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource != None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest != None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if BuildOptions.DisableCache:
self.Db = WorkspaceDatabase(":memory:")
else:
self.Db = WorkspaceDatabase(GlobalData.gDatabasePath, self.Reparse)
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory,'.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("ECP_SOURCE", os.environ["ECP_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_SOURCE", os.environ["EDK_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EFI_SOURCE", os.environ["EFI_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList == None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append("MSFT")
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber == None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
try:
self.ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
# create metafile database
if not self.Db_Flag:
self.Db.InitDatabase()
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines.keys():
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db.InitDatabase()
self.Db_Flag = True
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines.keys():
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory,'.cache','.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = itertools.imap(lambda l: l.split('=',1), envs)
envs = itertools.ifilter(lambda l: len(l) == 2, envs)
envs = itertools.imap(lambda l: [i.strip() for i in l], envs)
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand={}):
if AutoGenObject == None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile, FfsCommand)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand == None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject == None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand == None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
LaunchCommand(AutoGenObject.GenFdsCommand, AutoGenObject.MakeFileDir)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
InfFileNameList = ModuleList.keys()
#InfFileNameList.sort()
for InfFile in InfFileNameList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect funtion address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
if ModuleInfo.Arch == 'IPF' and Name.endswith('_ModuleEntryPoint'):
#
# Get the real entry point address for IPF image.
#
ModuleInfo.Image.EntryPoint = RelativeAddress
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.write('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.write('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.write('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.write('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.write('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.write('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add funtion address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.write(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.write(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict.keys():
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid != None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.write('%s' % (Line))
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid != None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.write('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
IsIpfPlatform = False
if 'IPF' in self.ArchList:
IsIpfPlatform = True
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in ['PEI_CORE', 'PEIM', 'COMBINED_PEIM_DRIVER', 'PIC_PEIM', 'RELOCATABLE_PEIM', 'DXE_CORE']:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in ['BS_DRIVER', 'DXE_DRIVER', 'UEFI_DRIVER']:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['DXE_RUNTIME_DRIVER', 'RT_DRIVER', 'DXE_SAL_DRIVER', 'SAL_RT_DRIVER']:
RtModuleList[Module.MetaFile] = ImageInfo
#IPF runtime driver needs to be at 2 page alignment.
if IsIpfPlatform and ImageInfo.Image.Size % 0x2000 != 0:
ImageInfo.Image.Size = (ImageInfo.Image.Size / 0x2000 + 1) * 0x2000
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['SMM_CORE', 'DXE_SMM_DRIVER', 'MM_STANDALONE', 'MM_CORE_STANDALONE']:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == 'DXE_SMM_DRIVER':
PiSpecVersion = '0x00000000'
if 'PI_SPECIFICATION_VERSION' in Module.Module.Specification:
PiSpecVersion = Module.Module.Specification['PI_SPECIFICATION_VERSION']
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
# Make IPF runtime driver at 2 page alignment.
if IsIpfPlatform:
ReservedRuntimeMemorySize = TopMemoryAddress % 0x2000
RtSize = RtSize + ReservedRuntimeMemorySize
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize / 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.write('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize / 0x1000))
MapBuffer.write('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize / 0x1000))
MapBuffer.write('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize / 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.write('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize / 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.write('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, MapBuffer.getvalue(), False)
MapBuffer.close()
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None: continue
MaList.append(Ma)
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
self.BuildModules.append(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
def _GenFfsCmd(self):
CmdListDict = {}
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, self.ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
if (tmpInf, tmpArch) not in CmdListDict.keys():
CmdListDict[tmpInf, tmpArch] = [Cmd]
else:
CmdListDict[tmpInf, tmpArch].append(Cmd)
return CmdListDict
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa == None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser != None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None:
continue
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
self.Progress.Stop("done!")
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
# Save temp tables to a TmpTableDict.
#
for Key in Wa.BuildDatabase._CACHE_:
if Wa.BuildDatabase._CACHE_[Key]._RawData and Wa.BuildDatabase._CACHE_[Key]._RawData._Table and Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table:
if TemporaryTablePattern.match(Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table):
TmpTableDict[Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table] = Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Cur
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
LaunchCommand(Wa.GenFdsCommand, os.getcwd())
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.iteritems():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print >> toolsFile, ' '.join(guidedSectionTool)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
self.Db.Close()
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
for Module in self.HashSkipModules:
Module.CreateAsBuiltInf(True)
self.BuildModules = []
self.HashSkipModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
#self.DumpBuildData()
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def DumpBuildData(self):
CacheDirectory = os.path.dirname(GlobalData.gDatabasePath)
Utils.CreateDirectory(CacheDirectory)
Utils.DataDump(Utils.gFileTimeStampCache, os.path.join(CacheDirectory, "gFileTimeStampCache"))
Utils.DataDump(Utils.gDependencyDatabase, os.path.join(CacheDirectory, "gDependencyDatabase"))
def RestoreBuildData(self):
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gFileTimeStampCache")
if Utils.gFileTimeStampCache == {} and os.path.isfile(FilePath):
Utils.gFileTimeStampCache = Utils.DataRestore(FilePath)
if Utils.gFileTimeStampCache == None:
Utils.gFileTimeStampCache = {}
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gDependencyDatabase")
if Utils.gDependencyDatabase == {} and os.path.isfile(FilePath):
Utils.gDependencyDatabase = Utils.DataRestore(FilePath)
if Utils.gDependencyDatabase == None:
Utils.gDependencyDatabase = {}
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList != None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'IPF', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. Less than 2 will disable multi-thread builds.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD','LIBRARY','FLASH','DEPEX','BUILD_FLAGS','FIXED_ADDRESS','HASH','EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
Parser.add_option("--hash", action="store_true", dest="UseHashCache", default=False, help="Enable hash-based caching during build process.")
Parser.add_option("--binary-destination", action="store", type="string", dest="BinCacheDest", help="Generate a cache of binary files in the specified directory.")
Parser.add_option("--binary-source", action="store", type="string", dest="BinCacheSource", help="Consume a cache of binary files from the specified directory.")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose != None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet != None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug != None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile != None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile != None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile != None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag != None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
# Drop temp tables to avoid database locked.
for TmpTableName in TmpTableDict:
SqlCommand = """drop table IF EXISTS %s""" % TmpTableName
TmpTableDict[TmpTableName].execute(SqlCommand)
#MyBuild.DumpBuildData()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError, X:
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning, X:
# error from Fdf parser
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb != None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to edk2-devel@lists.01.org for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild != None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
MyBuild.Db.Close()
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
cclient2.py
|
import socket
import threading
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(("127.0.0.1", 14900))
# Задание №4
# КЛИЕНТ
def outgoing():
while True:
message = input()
conn.send(b"Client 2: " + message.encode("utf-8"))
def incoming():
while True:
a = conn.recv(1024)
print(a.decode("utf-8"))
threading.Thread(target=outgoing).start()
threading.Thread(target=incoming).start()
|
process.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import sys
import time
import types
import signal
import subprocess
import logging
import multiprocessing
import multiprocessing.util
import threading
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
import salt.log.setup
from salt.log.mixins import NewStyleClassMixIn
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def check_pidfile(pidfile):
'''
Determine if a pidfile has been written out
'''
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
with salt.utils.fopen(pidfile) as pdf:
pid = pdf.read()
return int(pid)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
def add_process(self, tgt, args=None, kwargs=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
process.start()
# create a nicer name for the debug log
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}.{1}.{2}'.format(
tgt.__module__,
tgt.__class__,
tgt.__name__,
)
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
while True:
try:
# in case someone died while we were waiting...
self.check_children()
if not salt.utils.is_windows():
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug('Process of pid {0} died, not a known'
' process, will not restart'.format(pid))
continue
self.restart_process(pid)
else:
# os.wait() is not supported on Windows.
time.sleep(10)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
with open(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for pid, p_map in six.iteritems(self._process_map.copy()):
try:
p_map['Process'].terminate()
except OSError as exc:
if exc.errno != 3:
raise
del self._process_map[pid]
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
def __init__(self, *args, **kwargs):
self.log_queue = kwargs.pop('log_queue', salt.log.setup.get_multiprocessing_logging_queue())
multiprocessing.util.register_after_fork(self, MultiprocessingProcess.__setup_process_logging)
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
def __setup_process_logging(self):
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import contextlib
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 30.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with contextlib.ExitStack() as stack:
if should_die:
stack.enter_context(self.assertWarnsRegex(
UserWarning,
"semaphore_tracker: process died"))
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
test_stub.py
|
'''
Create an unified test_stub to share test operations
@author: Youyk
'''
import os
import subprocess
import sys
import time
import threading
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.zstack_test.zstack_test_vm as zstack_vm_header
import zstackwoodpecker.zstack_test.zstack_test_volume as zstack_volume_header
import zstackwoodpecker.zstack_test.zstack_test_security_group as zstack_sg_header
import zstackwoodpecker.zstack_test.zstack_test_eip as zstack_eip_header
import zstackwoodpecker.zstack_test.zstack_test_vip as zstack_vip_header
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import apibinding.inventory as inventory
import random
Port = test_state.Port
rule1_ports = Port.get_ports(Port.rule1_ports)
rule2_ports = Port.get_ports(Port.rule2_ports)
rule3_ports = Port.get_ports(Port.rule3_ports)
rule4_ports = Port.get_ports(Port.rule4_ports)
rule5_ports = Port.get_ports(Port.rule5_ports)
denied_ports = Port.get_denied_ports()
#rule1_ports = [1, 22, 100]
#rule2_ports = [9000, 9499, 10000]
#rule3_ports = [60000, 60010, 65535]
#rule4_ports = [5000, 5501, 6000]
#rule5_ports = [20000, 28999, 30000]
#test_stub.denied_ports = [101, 4999, 8990, 15000, 30001, 49999]
target_ports = rule1_ports + rule2_ports + rule3_ports + rule4_ports + rule5_ports + denied_ports
def create_vlan_vm(l3_name=None, disk_offering_uuids=None, system_tags=None, session_uuid = None, instance_offering_uuid = None):
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
if not l3_name:
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'vlan_vm', \
disk_offering_uuids, system_tags=system_tags, \
instance_offering_uuid = instance_offering_uuid,
session_uuid = session_uuid)
def create_lb_vm(l3_name=None, disk_offering_uuids=None, session_uuid = None):
'''
Load Balance VM will only use L3VlanNetworkName6
'''
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
if not l3_name:
l3_name = os.environ.get('l3VlanNetworkName6')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'vlan_lb_vm', disk_offering_uuids, session_uuid = session_uuid)
def create_sg_vm(l3_name=None, disk_offering_uuids=None, session_uuid = None):
'''
SG test need more network commands in guest. So it needs VR image.
'''
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
if not l3_name:
#l3_name = 'guestL3VlanNetwork1'
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'vlan_vm', disk_offering_uuids, session_uuid = session_uuid)
def create_windows_vm(l3_name=None, disk_offering_uuids=None, session_uuid = None):
'''
Create windows platform type vm.
'''
image_name = os.environ.get('imageName_windows')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
if not l3_name:
#l3_name = 'guestL3VlanNetwork1'
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'windows_vm', disk_offering_uuids, session_uuid = session_uuid)
def create_windows_vm_2(l3_name=None, disk_offering_uuids=None, session_uuid = None, instance_offering_uuid = None):
'''
Create windows platform type vm.
'''
image_name = os.environ.get('imageName_windows')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
if not l3_name:
#l3_name = 'guestL3VlanNetwork1'
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'windows_vm', disk_offering_uuids, instance_offering_uuid = instance_offering_uuid, session_uuid = session_uuid)
def create_other_vm(l3_name=None, disk_offering_uuids=None, session_uuid = None):
'''
Create other platform type vm.
'''
image_name = os.environ.get('imageName_other')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
if not l3_name:
#l3_name = 'guestL3VlanNetwork1'
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'other_vm', disk_offering_uuids, session_uuid = session_uuid)
def create_basic_vm(disk_offering_uuids=None, session_uuid = None):
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'basic_no_vlan_vm', disk_offering_uuids, session_uuid = session_uuid)
def create_user_vlan_vm(l3_name=None, disk_offering_uuids=None, session_uuid = None):
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
if not l3_name:
l3_name = os.environ.get('l3NoVlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'user_vlan_vm', disk_offering_uuids, session_uuid = session_uuid)
def create_specified_ps_vm(l3_name=None, ps_uuid=None, session_uuid = None):
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
if not l3_name:
l3_name = os.environ.get('l3NoVlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'user_vlan_vm', session_uuid = session_uuid, ps_uuid = ps_uuid)
def create_vlan_sg_vm(disk_offering_uuids=None, session_uuid = None):
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'vlan_sg_vm', disk_offering_uuids, session_uuid = session_uuid)
def create_dnat_vm(disk_offering_uuids=None, session_uuid = None):
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanDNATNetworkName')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'vlan_sg_vm', disk_offering_uuids, session_uuid = session_uuid)
def create_vm_with_user_args(system_tags = None, session_uuid = None):
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
return create_vm([l3_net_uuid], image_uuid, 'user_args_vm', system_tags = system_tags, session_uuid = session_uuid)
# parameter: vmname; l3_net: l3_net_description, or [l3_net_uuid,]; image_uuid:
def create_vm(l3_uuid_list, image_uuid, vm_name = None, \
disk_offering_uuids = None, default_l3_uuid = None, \
system_tags = None, instance_offering_uuid = None, session_uuid = None, ps_uuid=None):
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
if not instance_offering_uuid:
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_l3_uuids(l3_uuid_list)
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_name(vm_name)
vm_creation_option.set_data_disk_uuids(disk_offering_uuids)
vm_creation_option.set_default_l3_uuid(default_l3_uuid)
vm_creation_option.set_system_tags(system_tags)
vm_creation_option.set_session_uuid(session_uuid)
vm_creation_option.set_ps_uuid(ps_uuid)
vm = zstack_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
return vm
def create_vm_with_iso(l3_uuid_list, image_uuid, vm_name = None, root_disk_uuids = None, instance_offering_uuid = None, \
disk_offering_uuids = None, default_l3_uuid = None, system_tags = None, \
session_uuid = None, ps_uuid=None):
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
if not instance_offering_uuid:
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_l3_uuids(l3_uuid_list)
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_name(vm_name)
vm_creation_option.set_root_disk_uuid(root_disk_uuids)
vm_creation_option.set_data_disk_uuids(disk_offering_uuids)
vm_creation_option.set_default_l3_uuid(default_l3_uuid)
vm_creation_option.set_system_tags(system_tags)
vm_creation_option.set_session_uuid(session_uuid)
vm_creation_option.set_ps_uuid(ps_uuid)
vm = zstack_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
return vm
def create_volume(volume_creation_option=None, session_uuid = None):
if not volume_creation_option:
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
volume_creation_option.set_name('vr_test_volume')
volume_creation_option.set_session_uuid(session_uuid)
volume = zstack_volume_header.ZstackTestVolume()
volume.set_creation_option(volume_creation_option)
volume.create()
return volume
def create_sg(sg_creation_option=None, session_uuid = None):
if not sg_creation_option:
sg_creation_option = test_util.SecurityGroupOption()
sg_creation_option.set_name('test_sg')
sg_creation_option.set_description('test sg description')
sg_creation_option.set_session_uuid(session_uuid)
sg = zstack_sg_header.ZstackTestSecurityGroup()
sg.set_creation_option(sg_creation_option)
sg.create()
return sg
def create_vlan_vm_with_volume(l3_name=None, disk_offering_uuids=None, disk_number=None, session_uuid = None):
if not disk_offering_uuids:
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
disk_offering_uuids = [disk_offering.uuid]
if disk_number:
for i in range(disk_number - 1):
disk_offering_uuids.append(disk_offering.uuid)
return create_vlan_vm(l3_name, disk_offering_uuids, \
session_uuid = session_uuid)
def create_eip(eip_name=None, vip_uuid=None, vnic_uuid=None, vm_obj=None, \
session_uuid = None):
if not vip_uuid:
l3_name = os.environ.get('l3PublicNetworkName')
l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vip_uuid = net_ops.acquire_vip(l3_uuid).uuid
eip_option = test_util.EipOption()
eip_option.set_name(eip_name)
eip_option.set_vip_uuid(vip_uuid)
eip_option.set_vm_nic_uuid(vnic_uuid)
eip_option.set_session_uuid(session_uuid)
eip = zstack_eip_header.ZstackTestEip()
eip.set_creation_option(eip_option)
if vnic_uuid and not vm_obj:
test_util.test_fail('vm_obj can not be None in create_eip() API, when setting vm_nic_uuid.')
eip.create(vm_obj)
return eip
def create_vip(vip_name=None, l3_uuid=None, session_uuid = None):
if not vip_name:
vip_name = 'test vip'
if not l3_uuid:
l3_name = os.environ.get('l3PublicNetworkName')
l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vip_creation_option = test_util.VipOption()
vip_creation_option.set_name(vip_name)
vip_creation_option.set_l3_uuid(l3_uuid)
vip_creation_option.set_session_uuid(session_uuid)
vip = zstack_vip_header.ZstackTestVip()
vip.set_creation_option(vip_creation_option)
vip.create()
return vip
def create_vr_vm(test_obj_dict, l3_name, session_uuid = None):
'''
'''
vr_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vrs = test_lib.lib_find_vr_by_l3_uuid(vr_l3_uuid)
temp_vm = None
if not vrs:
#create temp_vm1 for getting vlan1's vr for test pf_vm portforwarding
temp_vm = create_vlan_vm(l3_name, session_uuid = session_uuid)
test_obj_dict.add_vm(temp_vm)
vr = test_lib.lib_find_vr_by_vm(temp_vm.vm)[0]
temp_vm.destroy(session_uuid)
test_obj_dict.rm_vm(temp_vm)
else:
vr = vrs[0]
if not test_lib.lib_is_vm_running(vr):
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_skip('vr: %s is not running. Will skip test.' % vr.uuid)
return vr
def share_admin_resource(account_uuid_list):
def get_uuid(resource):
temp_list = []
for item in resource:
temp_list.append(item.uuid)
return temp_list
resource_list = []
resource_list.extend(get_uuid(res_ops.get_resource(res_ops.INSTANCE_OFFERING)))
resource_list.extend(get_uuid(res_ops.get_resource(res_ops.IMAGE)))
resource_list.extend(get_uuid(res_ops.get_resource(res_ops.L3_NETWORK)))
resource_list.extend(get_uuid(res_ops.get_resource(res_ops.DISK_OFFERING)))
acc_ops.share_resources(account_uuid_list, resource_list)
def get_vr_by_private_l3_name(l3_name):
vr_l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
vrs = test_lib.lib_find_vr_by_l3_uuid(vr_l3_uuid)
if not vrs:
#create temp_vm for getting vlan1's vr
temp_vm = create_vlan_vm(l3_name)
vr = test_lib.lib_find_vr_by_vm(temp_vm.vm)[0]
temp_vm.destroy()
else:
vr = vrs[0]
return vr
def exercise_parallel(func, ops_num=10, thread_threshold=3):
for ops_id in range(ops_num):
thread = threading.Thread(target=func, args=(ops_id, ))
while threading.active_count() > thread_threshold:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
def sleep_util(timestamp):
while True:
if time.time() >= timestamp:
break
time.sleep(0.5)
def create_test_file(vm_inv, test_file):
'''
the bandwidth is for calculate the test file size,
since the test time should be finished in 60s.
bandwidth unit is KB.
'''
file_size = 1024 * 2
seek_size = file_size / 1024 - 1
cmd = 'dd if=/dev/zero of=%s bs=1K count=1 seek=%d' \
% (test_file, seek_size)
if not test_lib.lib_execute_command_in_vm(vm_inv, cmd):
test_util.test_fail('test file is not created')
def attach_mount_volume(volume, vm, mount_point):
volume.attach(vm)
import tempfile
script_file = tempfile.NamedTemporaryFile(delete=False)
script_file.write('''
mkdir -p %s
device="/dev/`ls -ltr --file-type /dev | awk '$4~/disk/ {print $NF}' | grep -v '[[:digit:]]' | tail -1`"
mount ${device}1 %s
''' % (mount_point, mount_point))
script_file.close()
vm_inv = vm.get_vm()
if not test_lib.lib_execute_shell_script_in_vm(vm_inv, script_file.name):
test_util.test_fail("mount operation failed in [volume:] %s in [vm:] %s" % (volume.get_volume().uuid, vm_inv.uuid))
os.unlink(script_file.name)
def scp_file_to_vm(vm_inv, src_file, target_file):
vm_ip = vm_inv.vmNics[0].ip
vm_username = test_lib.lib_get_vm_username(vm_inv)
vm_password = test_lib.lib_get_vm_password(vm_inv)
ssh.scp_file(src_file, target_file, vm_ip, vm_username, vm_password)
def make_ssh_no_password(vm_inv):
vm_ip = vm_inv.vmNics[0].ip
ssh.make_ssh_no_password(vm_ip, test_lib.lib_get_vm_username(vm_inv), \
test_lib.lib_get_vm_password(vm_inv))
def create_named_vm(vm_name=None, disk_offering_uuids=None, session_uuid = None):
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
if not vm_name:
vm_name = 'named_vm'
return create_vm([l3_net_uuid], image_uuid, vm_name, disk_offering_uuids, session_uuid = session_uuid)
def time_convert(log_str):
time_str = log_str.split()[0]+' '+log_str.split()[1]
time_microscond = time_str.split(',')[1]
time_str = time_str.split(',')[0]
time_tuple = time.strptime(time_str, "%Y-%m-%d %H:%M:%S")
return int(time.mktime(time_tuple)*1000+int(time_microscond))
def get_stage_time(vm_name, begin_time):
mn_server_log = "/usr/local/zstacktest/apache-tomcat/logs/management-server.log"
file_obj = open(mn_server_log)
for line in file_obj.readlines():
if line.find('APICreateVmInstanceMsg') != -1 and line.find(vm_name) != -1:
time_stamp = time_convert(line)
if int(time_stamp) >= begin_time:
api_id = line.split('{"', 1)[1].split(',')[-3].split(':')[1].strip('"')
break
file_obj.close
log_str = ''
select_bs_time = select_bs_end_time = select_bs_begin_time = 0
allocate_host_time = allocate_host_end_time = allocate_host_begin_time = 0
allocate_ps_time = allocate_ps_end_time = allocate_ps_begin_time = 0
local_storage_allocate_capacity_time = local_storage_allocate_capacity_end_time = local_storage_allocate_capacity_begin_time = 0
allocate_volume_time = allocate_volume_end_time = allocate_volume_begin_time = 0
allocate_nic_time = allocate_nic_end_time = allocate_nic_begin_time = 0
instantiate_res_time = instantiate_res_end_time = instantiate_res_begin_time = 0
instantiate_res_pre_time = instantiate_res_pre_end_time = instantiate_res_pre_begin_time = 0
create_on_hypervisor_time = create_on_hypervisor_end_time = create_on_hypervisor_begin_time = 0
instantiate_res_post_time = instantiate_res_post_end_time = instantiate_res_post_begin_time = 0
file_obj = open(mn_server_log)
for line in file_obj.readlines():
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmImageSelectBackupStorageFlow') != -1 and line.find('start executing flow') != -1:
select_bs_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmImageSelectBackupStorageFlow') != -1 and line.find('successfully executed flow') != -1:
select_bs_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateHostFlow') != -1 and line.find('start executing flow') != -1:
allocate_host_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateHostFlow') != -1 and line.find('successfully executed flow') != -1:
allocate_host_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocatePrimaryStorageFlow') != -1 and line.find('start executing flow') != -1:
allocate_ps_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocatePrimaryStorageFlow') != -1 and line.find('successfully executed flow') != -1:
allocate_ps_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('LocalStorageAllocateCapacityFlow') != -1 and line.find('start executing flow') != -1:
local_storage_allocate_capacity_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('LocalStorageAllocateCapacityFlow') != -1 and line.find('successfully executed flow') != -1:
local_storage_allocate_capacity_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateVolumeFlow') != -1 and line.find('start executing flow') != -1:
allocate_volume_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateVolumeFlow') != -1 and line.find('successfully executed flow') != -1:
allocate_volume_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateNicFlow') != -1 and line.find('start executing flow') != -1:
allocate_nic_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmAllocateNicFlow') != -1 and line.find('successfully executed flow') != -1:
allocate_nic_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmInstantiateResourcePreFlow') != -1 and line.find('start executing flow') != -1:
instantiate_res_pre_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmInstantiateResourcePreFlow') != -1 and line.find('successfully executed flow') != -1:
instantiate_res_pre_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmCreateOnHypervisorFlow') != -1 and line.find('start executing flow') != -1:
create_on_hypervisor_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmCreateOnHypervisorFlow') != -1 and line.find('successfully executed flow') != -1:
create_on_hypervisor_end_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmInstantiateResourcePostFlow') != -1 and line.find('start executing flow') != -1:
instantiate_res_post_begin_time = time_convert(line)
if line.find(api_id) != -1 and line.find('SimpleFlowChain') != -1 and line.find('VmInstantiateResourcePostFlow') != -1 and line.find('successfully executed flow') != -1:
instantiate_res_post_end_time = time_convert(line)
file_obj.close()
if select_bs_end_time != 0 and select_bs_begin_time != 0:
select_bs_time = select_bs_end_time - select_bs_begin_time
if allocate_host_end_time != 0 and allocate_host_begin_time != 0:
allocate_host_time = allocate_host_end_time - allocate_host_begin_time
if allocate_ps_end_time != 0 and allocate_ps_begin_time != 0:
allocate_ps_time = allocate_ps_end_time - allocate_ps_begin_time
if local_storage_allocate_capacity_end_time != 0 and local_storage_allocate_capacity_begin_time != 0:
local_storage_allocate_capacity_time = local_storage_allocate_capacity_end_time - local_storage_allocate_capacity_begin_time
if allocate_volume_end_time != 0 and allocate_volume_begin_time != 0:
allocate_volume_time = allocate_volume_end_time - allocate_volume_begin_time
if allocate_nic_end_time != 0 and allocate_volume_begin_time != 0:
allocate_nic_time = allocate_nic_end_time - allocate_nic_begin_time
if instantiate_res_pre_end_time != 0 and instantiate_res_pre_begin_time != 0:
instantiate_res_pre_time = instantiate_res_pre_end_time - instantiate_res_pre_begin_time
if create_on_hypervisor_end_time != 0 and create_on_hypervisor_begin_time != 0:
create_on_hypervisor_time = create_on_hypervisor_end_time - create_on_hypervisor_begin_time
if instantiate_res_post_end_time != 0 and instantiate_res_post_begin_time != 0:
instantiate_res_post_time = instantiate_res_post_end_time - instantiate_res_post_begin_time
return [select_bs_time, allocate_host_time, allocate_ps_time, local_storage_allocate_capacity_time, allocate_volume_time, allocate_nic_time, instantiate_res_pre_time, create_on_hypervisor_time, instantiate_res_post_time]
def execute_shell_in_process(cmd, timeout=10, logfd=None):
if not logfd:
process = subprocess.Popen(cmd, executable='/bin/sh', shell=True, universal_newlines=True)
else:
process = subprocess.Popen(cmd, executable='/bin/sh', shell=True, stdout=logfd, stderr=logfd, universal_newlines=True)
start_time = time.time()
while process.poll() is None:
curr_time = time.time()
TEST_TIME = curr_time - start_time
if TEST_TIME > timeout:
process.kill()
test_util.test_logger('[shell:] %s timeout ' % cmd)
return False
time.sleep(1)
test_util.test_logger('[shell:] %s is finished.' % cmd)
return process.returncode
def find_ps_local():
ps_list = res_ops.get_resource(res_ops.PRIMARY_STORAGE)
for ps in ps_list:
if ps.type == inventory.LOCAL_STORAGE_TYPE:
return ps
test_util.test_logger("Can not find local primary storage ")
return None
def find_ps_nfs():
ps_list = res_ops.get_resource(res_ops.PRIMARY_STORAGE)
for ps in ps_list:
if ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
return ps
test_util.test_logger("Can not find NFS primary storage ")
return None
def ensure_hosts_connected(wait_time):
for i in range(wait_time):
time.sleep(1)
host_list = res_ops.query_resource(res_ops.HOST)
for host in host_list:
if not "connected" in host.status.lower():
test_util.test_logger("found not connected ps status: %s" %(host.status))
break
else:
return
else:
test_util.test_fail("host status didn't change to Connected within %s, therefore, failed" % (wait_time))
def create_vm_with_random_offering(vm_name, image_name=None, l3_name=None, session_uuid=None,
instance_offering_uuid=None, host_uuid=None, disk_offering_uuids=None,
root_password=None, ps_uuid=None, system_tags=None):
if image_name:
imagename = os.environ.get(image_name)
image_uuid = test_lib.lib_get_image_by_name(imagename).uuid
else:
conf = res_ops.gen_query_conditions('format', '!=', 'iso')
conf = res_ops.gen_query_conditions('system', '=', 'false', conf)
image_uuid = random.choice(res_ops.query_resource(res_ops.IMAGE, conf)).uuid
if l3_name:
l3name = os.environ.get(l3_name)
l3_net_uuid = test_lib.lib_get_l3_by_name(l3name).uuid
else:
l3_net_uuid = random.choice(res_ops.get_resource(res_ops.L3_NETWORK)).uuid
if not instance_offering_uuid:
conf = res_ops.gen_query_conditions('type', '=', 'UserVM')
instance_offering_uuid = random.choice(res_ops.query_resource(res_ops.INSTANCE_OFFERING, conf)).uuid
vm_creation_option = test_util.VmOption()
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name(vm_name)
if system_tags:
vm_creation_option.set_system_tags(system_tags)
if disk_offering_uuids:
vm_creation_option.set_data_disk_uuids(disk_offering_uuids)
if root_password:
vm_creation_option.set_root_password(root_password)
if host_uuid:
vm_creation_option.set_host_uuid(host_uuid)
if session_uuid:
vm_creation_option.set_session_uuid(session_uuid)
if ps_uuid:
vm_creation_option.set_ps_uuid(ps_uuid)
vm = zstack_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
return vm
def create_multi_volumes(count=10, host_uuid=None, ps=None):
volume_list = []
for i in xrange(count):
disk_offering = random.choice(res_ops.get_resource(res_ops.DISK_OFFERING))
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
if ps:
volume_creation_option.set_primary_storage_uuid(ps.uuid)
if ps.type == inventory.LOCAL_STORAGE_TYPE:
if not host_uuid:
host_uuid = random.choice(res_ops.get_resource(res_ops.HOST)).uuid
volume_creation_option.set_system_tags(['localStorage::hostUuid::{}'.format(host_uuid)])
volume = create_volume(volume_creation_option)
volume_list.append(volume)
for volume in volume_list:
volume.check()
if ps:
for volume in volume_list:
assert volume.get_volume().primaryStorageUuid == ps.uuid
return volume_list
def migrate_vm_to_random_host(vm):
test_util.test_dsc("migrate vm to random host")
if not test_lib.lib_check_vm_live_migration_cap(vm.vm):
test_util.test_skip('skip migrate if live migrate not supported')
target_host = test_lib.lib_find_random_host(vm.vm)
current_host = test_lib.lib_find_host_by_vm(vm.vm)
vm.migrate(target_host.uuid)
new_host = test_lib.lib_get_vm_host(vm.vm)
if not new_host:
test_util.test_fail('Not find available Hosts to do migration')
if new_host.uuid != target_host.uuid:
test_util.test_fail('[vm:] did not migrate from [host:] %s to target [host:] %s, but to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid, new_host.uuid))
else:
test_util.test_logger('[vm:] %s has been migrated from [host:] %s to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid))
def generate_pub_test_vm(tbj):
disk_offering_uuids = [random.choice(res_ops.get_resource(res_ops.DISK_OFFERING)).uuid]
l3_name_list = ['l3PublicNetworkName', 'l3NoVlanNetworkName1', 'l3NoVlanNetworkName2']
pub_l3_vm, flat_l3_vm, vr_l3_vm = [create_vm_with_random_offering(vm_name='test_vm',
image_name='imageName_net',
disk_offering_uuids=random.choice([None, disk_offering_uuids]),
l3_name=name) for name in l3_name_list]
for vm in pub_l3_vm, flat_l3_vm, vr_l3_vm:
vm.check()
tbj.add_vm(vm)
return pub_l3_vm, flat_l3_vm, vr_l3_vm
|
train.py
|
#!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import shutil
import time
import argparse
import importlib
import threading
import traceback
import torch.distributed as dist
import torch.multiprocessing as mp
from tqdm import tqdm
from py3nvml.py3nvml import *
from torch.multiprocessing import Process, Queue, Pool
from core.dbs import datasets
from core.test import test_func
from core.utils import stdout_to_tqdm, pLogger
from core.config import SystemConfig
from core.sample import data_sampling_func
from core.nnet.py_factory import NetworkFactory
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
date = time.strftime('%Y-%m-%d-%H-%M', time.localtime())
def parse_args():
parser = argparse.ArgumentParser(description="Training Script")
parser.add_argument("cfg_file", help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--workers", default=4, type=int)
parser.add_argument("--initialize", action="store_true")
parser.add_argument("--distributed", action="store_true")
parser.add_argument("--world-size", default=-1, type=int,
help="number of nodes of distributed training")
parser.add_argument("--rank", default=0, type=int,
help="node rank for distributed training")
parser.add_argument("--dist-url", default=None, type=str,
help="url used to set up distributed training")
parser.add_argument("--dist-backend", default="nccl", type=str)
args = parser.parse_args()
return args
def prefetch_data(train_logger, system_config, db, queue, sample_data, data_aug):
ind = 0
train_logger.train_logging("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(system_config, db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def _pin_memory(ts):
if type(ts) is list:
return [t.pin_memory() for t in ts]
return ts.pin_memory()
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [_pin_memory(x) for x in data["xs"]]
data["ys"] = [_pin_memory(y) for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(train_logger, system_config, dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(train_logger, system_config, db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def terminate_tasks(tasks):
for task in tasks:
task.terminate()
def make_dirs(directories):
for directory in directories:
if not os.path.exists(directory):
os.makedirs(directory)
def train(train_logger, training_dbs, validation_db, system_config, model, args):
# reading arguments from command
start_iter = args.start_iter
distributed = args.distributed
world_size = args.world_size
initialize = args.initialize
gpu = args.gpu
rank = args.rank
# reading arguments from json file
batch_size = system_config.batch_size
learning_rate = system_config.learning_rate
max_iteration = system_config.max_iter
pretrained_model = system_config.pretrain
snapshot = system_config.snapshot
val_iter = system_config.val_iter
display = system_config.display
decay_rate = system_config.decay_rate
stepsize = system_config.stepsize
train_logger.train_logging("Process {}: building model...".format(rank))
nnet = NetworkFactory(system_config, model, distributed=distributed, gpu=gpu)
if initialize:
nnet.save_params(0)
exit(0)
# queues storing data for training
training_queue = Queue(system_config.prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_config.prefetch_size)
pinned_validation_queue = queue.Queue(5)
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(train_logger, system_config, training_dbs, training_queue, data_sampling_func, True)
if val_iter:
validation_tasks = init_parallel_jobs(train_logger, system_config, [validation_db], validation_queue, data_sampling_func, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
train_logger.train_logging("Process {}: loading from pretrained model".format(rank))
nnet.load_pretrained_params(pretrained_model)
if start_iter:
nnet.load_params(start_iter)
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.set_lr(learning_rate)
train_logger.train_logging("Process {}: training starts from iteration {} with learning_rate {}".format(rank, start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
if rank == 0:
train_logger.train_logging("training start...")
nnet.cuda()
nnet.train_mode()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = nnet.train(**training)
train_logger.tb_logging('Train/loss', {'tloss': training_loss.item()}, iteration)
if display and iteration % display == 0:
train_logger.train_logging("Process {}: training loss at iteration {}: {}".format(rank, iteration, training_loss.item()))
del training_loss
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
# calculate validation loss
validation = pinned_validation_queue.get(block=True)
validation_loss = nnet.validate(**validation)
train_logger.train_logging("Process {}: validation loss at iteration {}: {}".format(rank, iteration, validation_loss.item()))
train_logger.tb_logging('Val/loss', {'vloss': validation_loss.item()}, iteration)
nnet.train_mode()
if iteration % snapshot == 0 and rank == 0:
nnet.eval_mode()
# calculate validation mAP
val_split = system_config.val_split
mAP, _, detect_average_time = test(validation_db, system_config, nnet, val_iter, val_split, debug=True)
train_logger.train_logging("Process {}: mAP at iteration {}: {}".format(rank, iteration, mAP))
train_logger.train_logging("Detect average time: {}".format(detect_average_time))
nnet.train_mode()
if iteration % snapshot == 0 and rank == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# dc = 0
# handle = nvmlDeviceGetHandleByIndex(dc)
# res = nvmlDeviceGetUtilizationRates(handle)
# gpu_util = res.gpu
# res = nvmlDeviceGetMemoryInfo(handle)
# gpu_mem = res.used / 1024 / 1024
# train_logger.tb_logging('data/NV', {'gpu-util': gpu_util, 'gpu-mem': gpu_mem}, iteration)
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
terminate_tasks(training_tasks)
terminate_tasks(validation_tasks)
def test(db, system_config, nnet, val_iter, split, debug=False, suffix=None):
split = split
testiter = val_iter
debug = debug
suffix = suffix
result_dir = system_config.result_dir
result_dir = os.path.join(result_dir, str(testiter), split)
if suffix is not None:
result_dir = os.path.join(result_dir, suffix)
make_dirs([result_dir])
a, b, detect_average_time = test_func(system_config, db, nnet, result_dir, debug=debug)
return a, b, detect_average_time
def main(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
rank = args.rank
cfg_file = os.path.join("./configs", args.cfg_file + ".json")
with open(cfg_file, "r") as f:
config = json.load(f)
config["system"]["snapshot_name"] = args.cfg_file
system_config = SystemConfig().update_config(config["system"])
model_file = "core.models.{}".format(args.cfg_file)
model_file = importlib.import_module(model_file)
model = model_file.model(num_classes=config["db"]["categories"])
train_split = system_config.train_split
val_split = system_config.val_split
ckpt_path = os.path.join('cache/nnet/', args.cfg_file, date)
train_logger = pLogger(ckpt_path)
if not os.path.exists(ckpt_path):
os.makedirs(os.path.join(ckpt_path))
shutil.copyfile('{}'.format(cfg_file), '{}/{}'.format(ckpt_path, args.cfg_file + ".json"))
train_logger.train_logging("Process {}: loading all datasets...".format(rank))
dataset = system_config.dataset
workers = args.workers
train_logger.train_logging("Process {}: using {} workers".format(rank, workers))
training_dbs = [datasets[dataset](config["db"], split=train_split, sys_config=system_config) for _ in range(workers)]
validation_db = datasets[dataset](config["db"], split=val_split, sys_config=system_config)
if rank == 0:
print("system config...")
pprint.pprint(system_config.full)
train_logger.train_logging("system config...")
train_logger.train_logging(system_config.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
train_logger.train_logging("db config...")
train_logger.train_logging(training_dbs[0].configs)
train_logger.train_logging("len of db: {}".format(len(training_dbs[0].db_inds)))
train_logger.train_logging("distributed: {}".format(args.distributed))
train(train_logger, training_dbs, validation_db, system_config, model, args)
if __name__ == "__main__":
args = parse_args()
distributed = args.distributed
world_size = args.world_size
if distributed and world_size < 0:
raise ValueError("world size must be greater than 0 in distributed training")
ngpus_per_node = torch.cuda.device_count()
if distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main(None, ngpus_per_node, args)
|
EIH_Attenuation_Sim_Main.py
|
from os import getpid
from multiprocessing import Pool, Queue, cpu_count, get_context
from queue import Empty
import time
from ErgodicHarvestingLib.EIH_API_Sim_Entropy import EIH_Sim
from ErgodicHarvestingLib.utils import print_color
def QueueWorker(mp_queue):
while True:
try:
args, remaining_jobs = mp_queue.get(block=True, timeout=5.0)
print_color(
f"[WorkerNode-{getpid()}] Submitting new job {args[3]}", color="cyan"
)
EIH_Sim(*args)
except Empty:
print_color(
f"[WorkerNode-{getpid()}] no more work to be done, existing",
color="yellow",
)
return
except Exception:
raise
def EID_Attenuation_Sim(simDataFile, nThread):
# Read simulation jobs
f = open(simDataFile, "r")
trials = f.readlines()
nSimTrials = len(trials)
# Submit simulations
if nThread > cpu_count():
nThread = cpu_count()
if nSimTrials < nThread:
nThread = nSimTrials
# Start a new parallel pool
print("Starting parallel pool with {0} threads".format(nThread))
ctx = get_context("fork")
pool = Pool(processes=nThread)
max_queue_size = min(2 * nThread, nSimTrials)
work_queue = Queue(maxsize=max_queue_size)
jobs = []
remaining_jobs = nSimTrials
# Kick off worker threads
for _ in range(nThread):
# Start a new job thread
try:
p = pool.Process(target=QueueWorker, args=(work_queue,))
except Exception:
if ctx is not None:
# Fallback to use context
p = ctx.Process(target=QueueWorker, args=(work_queue,))
p.start()
jobs.append(p)
for it in range(nSimTrials):
# Fill in work queue
work_queue.put((trials[it].split(), remaining_jobs), block=True, timeout=None)
remaining_jobs -= 1
print_color(
f"[MasterNode-{getpid()}]: Adding new job {trials[it].split()[3]}, "
f"remaining jobs {remaining_jobs}",
color="green",
)
# Unfortunately we need to wait briefly before adding new data into the queue.
# This is because it takes some time for the object to get properly ingested.
time.sleep(0.1)
# Wait until all the active thread to finish
for job in jobs:
job.join()
|
__init__.py
|
import os
import struct
import numpy as np
import pandas as pd
from abc import ABC, abstractmethod
class Dataset(ABC):
"""
Abstract data interface class.
"""
@abstractmethod
def __init__(self):
"""
Must define:
self.X -- numpy array of data samples;
self.y -- numpy array of labels;
self.names -- dictionary of names for each value of label
self.shape -- shape of the raw data
"""
super().__init__()
self.X = None
self.y = None
self.names = {}
self.shape = None
def __getitem__(self, id):
"""
Returns:
X, y -- data sample and label by given index
"""
return self.X[id], self.y[id]
def X(self):
"""
Returns:
X -- data samples
"""
return self.X
def y(self):
"""
Returns:
y -- data labels
"""
return self.y
def __len__(self):
"""
Returns:
n -- number of samples in the dataset
"""
if self.X.shape[0] != self.y.shape[0]:
raise RuntimeError("Data samples and labels sizes differ {} and {}, but must be the same".format(self.X.shape[0], self.y.shape[0]))
return self.X.shape[0]
from multiprocessing import Process, Pool, Queue
import time
import progressbar
class LargePool:
"""
Multiprocessing with progressbar.
"""
def __init__(self, tasks, worker_class, worker_args=(), worker_kwargs={}, message='Loading '):
self.tasks = tasks
self.worker_class = worker_class
self.worker_args = worker_args
self.worker_kwargs = worker_kwargs
self.message = message
def run(self, processes=None, progress=True, delay=0.2):
tasks = Queue()
size = len(self.tasks)
results = Queue(maxsize=size)
def init():
worker = self.worker_class(*self.worker_args, **self.worker_kwargs)
while True:
t = tasks.get()
results.put(worker(t))
def load_queue():
for t in self.tasks:
tasks.put(t)
p = Process(target=load_queue)
p.start()
pool = Pool(processes=processes, initializer=init)
if progress:
with progressbar.ProgressBar(max_value=size, prefix=self.message) as bar:
while not results.full():
bar.update(results.qsize())
time.sleep(delay)
res = [results.get() for i in range(size)]
p.terminate()
pool.terminate()
return [r for r in res if r is not None]
class Worker(ABC):
@abstractmethod
def __init__(self):
super().__init__()
pass
@abstractmethod
def __call__(self, task):
pass
def load_mnist_raw(path, kind):
"""
Load image/labels data packed as http://yann.lecun.com/exdb/mnist/.
Arguments:
path -- path to the loaded file
kind -- kind of the file contents:
'l' = labels
'i' = images
Returns:
data -- loaded data as numpy array
"""
with open(path, 'rb') as f:
if kind == 'l':
magic, n = struct.unpack('>ii', f.read(8))
data = np.fromfile(f, dtype=np.uint8)
elif kind == 'i':
magic, num, rows, cols = struct.unpack(">iiii", f.read(16))
data = np.fromfile(f, dtype=np.uint8).reshape(num, rows*cols)
else:
raise RuntimeError("Unsupported file contents kind: '{}'".format(kind))
return data
def load_mnist_like(folder='mnist'):
"""
Load MNIST(F-MNIST) dataset.
Returns:
X, y -- data points and labels
"""
train = {'i': 'data/{}/train-images-idx3-ubyte'.format(folder),
'l': 'data/{}/train-labels-idx1-ubyte'.format(folder)}
test = {'i': 'data/{}/t10k-images-idx3-ubyte'.format(folder),
'l': 'data/{}/t10k-labels-idx1-ubyte'.format(folder)}
files = [train, test]
storage = {'i': None,
'l': None}
for f in files:
for kind in storage:
arr = load_mnist_raw(f[kind], kind)
if storage[kind] is None:
storage[kind] = arr
else:
storage[kind] = np.concatenate((storage[kind], arr))
return storage['i'], storage['l']
class MNIST(Dataset):
"""
MNIST Dataset
Alias: mnist
http://yann.lecun.com/exdb/mnist/
"""
def __init__(self):
super().__init__()
self.X, self.y = load_mnist_like('mnist')
self.names = {k:str(k) for k in range(self.y.max())}
self.shape = (28, 28)
class FMNIST(Dataset):
"""
Fashion MNIST Dataset
Alias: fmnist
https://github.com/zalandoresearch/fashion-mnist
"""
def __init__(self):
super().__init__()
self.X, self.y = load_mnist_like('fmnist')
self.names = {
0: "T-shirt/top",
1: "Trouser",
2: "Pullover",
3: "Dress",
4: "Coat",
5: "Sandal",
6: "Shirt",
7: "Sneaker",
8: "Bag",
9: "Ankle boot"
}
self.shape = (28, 28)
class Iris(Dataset):
"""
Iris Dataset
https://archive.ics.uci.edu/ml/datasets/Iris
"""
def __init__(self):
super().__init__()
df = pd.read_csv("data/iris/iris.data", header=None)
self.X = df.iloc[:, :-1].values
classes = df.iloc[:, -1].astype("category").cat
self.y = classes.codes.values
self.names = dict(enumerate(classes.categories))
for k in self.names:
self.names[k] = self.names[k].rsplit('-', 1)[1].title()
self.shape = (self.X.shape[1], )
import re
import imageio
class CoilLoader(Worker):
def __init__(self, path):
super().__init__()
self.pattern = re.compile(r'obj(\d+)__(\d+).png')
self.path = path
def __call__(self, file):
match = self.pattern.match(file)
if match:
obj = match.group(1)
res = imageio.imread(os.path.join(self.path, file)).ravel(), int(obj)-1
return res
def load_coil_like(path):
"""
Load COIL-20(COIL-100) dataset.
Returns:
X, y -- data points and labels
"""
for _, _, f in os.walk(path):
fs = f
break
p = LargePool(fs, CoilLoader, (path,))
res = p.run()
X = []
y = []
for r in res:
X.append(r[0])
y.append(r[1])
return np.stack(X), np.stack(y)
class COIL20(Dataset):
"""
COIL-20 Dataset
Alias: coil20
http://www.cs.columbia.edu/CAVE/software/softlib/coil-20.php
"""
def __init__(self):
super().__init__()
self.X, self.y = load_coil_like('data/coil20/coil-20-proc')
self.names = {k:'Object ' + str(k) for k in range(self.y.max())}
self.shape = (128, 128)
class COIL100(Dataset):
"""
COIL-100 Dataset
Alias: coil100
http://www1.cs.columbia.edu/CAVE/software/softlib/coil-100.php
"""
def __init__(self):
super().__init__()
self.X, self.y = load_coil_like('data/coil100/coil-100')
self.names = {k:'Object ' + str(k) for k in range(self.y.max())}
self.shape = (128, 128, 3)
class PenDigits(Dataset):
"""
Pen Digits Dataset
Alias: pendigits
https://archive.ics.uci.edu/ml/datasets/optical+recognition+of+handwritten+digits
"""
def __init__(self):
super().__init__()
files = ["data/pendigits/optdigits.tes",
"data/pendigits/optdigits.tra"]
loaded = [None]*2
for f in files:
df = pd.read_csv(f, header=None)
for i in range(2):
if i == 0:
new = df.iloc[:, :-1].values
else:
new = df.iloc[:, -1].values
if loaded[i] is None:
loaded[i] = new
else:
loaded[i] = np.concatenate((loaded[i], new))
self.X, self.y = loaded
self.names = {k:str(k) for k in range(self.y.max())}
self.shape = (8, 8)
from io import StringIO
class CsvLoader(Worker):
def __init__(self, sep='\t'):
super().__init__()
self.sep = sep
def __call__(self, text):
csv = StringIO(text)
return pd.read_csv(csv, sep=self.sep, header=None, engine='c')
class CsvReader:
def __init__(self, path, nlines, chunksize=1024):
super().__init__()
self.nlines = nlines
self.chunksize = chunksize
self.path = path
def __iter__(self):
nlines = 0
nread = 0
text = ''
with open(self.path) as f:
f.readline()
for line in f:
if nlines == self.nlines:
break
nlines += 1
nread += 1
text += line + '\n'
if nread == self.chunksize:
yield text
nread = 0
text = ''
yield text
def __len__(self):
return (self.nlines+self.chunksize-1)//self.chunksize
class ScRNA(Dataset):
"""
Mouse scRNA-seq Dataset
Alias: scrna
https://hemberg-lab.github.io/scRNA.seq.datasets/mouse/brain/
"""
def __init__(self):
super().__init__()
# Load labels
df = pd.read_csv('data/scrna/GSE93374_cell_metadata.txt', sep='\t')
classes = df.iloc[:, 6].astype('category').cat
name_to_class = dict(zip(df.iloc[:, 0], classes.codes.values))
self.names = dict(enumerate(classes.categories))
df = pd.read_csv('data/scrna/GSE93374_Merged_all_020816_DGE.txt', sep='\t', nrows=1)
ind_to_name = df.columns.values
self.y = np.empty(len(ind_to_name), dtype=np.int)
for i in range(len(ind_to_name)):
self.y[i] = name_to_class[ind_to_name[i]]
# Load the data itself
path = 'data/scrna/GSE93374_Merged_all_020816_DGE.txt'
nlines = 26774
reader = CsvReader(path, nlines=nlines, chunksize=1024)
p = LargePool(reader, CsvLoader, ('\t',))
df = pd.concat(p.run())
self.X = df.iloc[:, 1:].values.T
self.shape = (nlines,)
class Shuttle(Dataset):
"""
Statlog (Shuttle) Dataset
Alias: shuttle
https://archive.ics.uci.edu/ml/datasets/Statlog+(Shuttle)
"""
def __init__(self, drop_time=True):
super().__init__()
base = "data/shuttle/shuttle."
exts = ["trn", "tst"]
vals = {'X': None,
'y': None}
for ext in exts:
df = pd.read_csv(base+ext, sep=' ').values
new = {'X': df[:, 1:-1] if drop_time else df[:, :-1],
'y': df[:, -1]}
for k in new:
if vals[k] is None:
vals[k] = new[k]
else:
vals[k] = np.concatenate((vals[k], new[k]))
self.X, self.y = vals['X'], vals['y']
self.names = {
1: 'Rad Flow',
2: 'Fpv Close',
3: 'Fpv Open',
4: 'High',
5: 'Bypass',
6: 'Bpv Close',
7: 'Bpv Open'
}
self.shape = (self.X.shape[1],)
|
__init__.py
|
import logging
import os
import subprocess
import sys
import threading
from typing import Optional, List, Tuple
import requests
from orangeshare import Config
from orangeshare.temp_dir import temp_dir
from orangeshare.updater.UpdatePopup import UpdatePopup
class Updater:
_instance = None
@staticmethod
def get_updater() -> 'Updater':
"""
Get the updater (singleton)
:return: The updater
"""
if Updater._instance is None:
Updater()
return Updater._instance
def __init__(self):
"""
Creates a new Updater instance.
Should not be called.
Instead use get_updater.
"""
if Updater._instance is not None:
raise Exception("Updater is a singleton")
else:
Updater._instance = self
self.newer_version_available: Optional[bool] = None
self.newer_version: Optional[str] = None
self.newer_version_executables: List[List[str]] = []
def get_github_version(self) -> Tuple[str, List[List[str]]]:
"""
Checks if a newer Version of Orange Share is available using the github API.
:return The version and the executables from the latest release
"""
response = requests.get("https://api.github.com/repos/Yannis4444/orange-share/releases/latest").json()
version = response["tag_name"].replace("v", "")
logging.info("got newest available version from GitHub: {}".format(version))
# get the executables
newer_version_executables = []
for asset in response["assets"]:
if asset["name"].endswith(".exe"):
newer_version_executables.append([asset["name"], asset["browser_download_url"]])
return version, newer_version_executables
def get_pip_version(self) -> Tuple[str, Optional[List[List[str]]]]:
"""
Checks if a newer Version of Orange Share is available using the PyPi API.
:return The version
"""
response = requests.get("https://pypi.org/pypi/orangeshare/json").json()
version = response["info"]["version"]
logging.info("got newest available version from PyPi: {}".format(version))
return version
def check_for_new_version(self, orange_share, current):
"""
Checks if a newer Version of Orange Share is available using the github API.
Will run the request as a Thread to avoid blocking.
Result will be written to self.newer_version_available.
True if a newer Version is available, False if not or the request failed
will open the pop up if a new version is available
:param orange_share: the orangeshare instance
:param current: the current version
"""
if self.newer_version_available is not None:
# already run
return
def get_version(updater, orange_share):
try:
if "--windows-installation" in sys.argv:
version, self.newer_version_executables = self.get_github_version()
else:
version = self.get_pip_version()
available_version = version.split(".")
current_version = current.split(".")
updater.newer_version_available = current_version[0] < available_version[0] or (current_version[0] == available_version[0] and current_version[1] < available_version[1] or (current_version[0] == available_version[0] and current_version[1] == available_version[1] and current_version[2] < available_version[2]))
if updater.newer_version_available:
logging.info("there is a newer version available")
updater.newer_version = version
# check if popup has to be opened
config = Config.get_config()
ignored_version = config.config.get("UPDATE", "ignore", fallback="")
if ignored_version != updater.newer_version:
UpdatePopup(orange_share, updater.newer_version)
except Exception as e:
logging.info("could not check if newer version is available: {}".format(e))
threading.Thread(target=get_version, args=(self, orange_share,)).start()
def windows_update(self) -> Tuple[bool, Optional[str]]:
"""
Downloads the newest executable from GitHub and runs it.
:return True if successful and an optional error string
"""
logging.info("downloading new version")
updater = Updater.get_updater()
# choose the right executable
url = ""
name = ""
for n, u in updater.newer_version_executables:
if n == f"orangeshare-{updater.newer_version}.exe":
url = u
name = n
break
if not url:
return False, "No installer found in newest Release"
r = requests.get(url)
filename = os.path.join(temp_dir.name, name)
with open(filename, 'wb') as output_file:
output_file.write(r.content)
logging.info("download complete")
logging.info("running updater")
subprocess.Popen([filename, "/SILENT"])
return True, None
def pip_update(self) -> Tuple[bool, Optional[str]]:
"""
Runs pip install --upgrade orangeshare.
:return True if successful and an optional error string
"""
cmd = [sys.executable, "-m", "pip", "install", "--upgrade", "orangeshare"]
logging.info("running {}".format(" ".join(cmd)))
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
logging.error("failed to upgrade from pip: {}".format(e))
return False, str(e)
logging.info("successfully upgraded orangeshare")
return True, None
|
LocalDispatcher.py
|
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import errno
import signal
import subprocess
import threading
import time
import traceback
import Gaffer
import IECore
class LocalDispatcher( Gaffer.Dispatcher ) :
def __init__( self, name = "LocalDispatcher", jobPool = None ) :
Gaffer.Dispatcher.__init__( self, name )
backgroundPlug = Gaffer.BoolPlug( "executeInBackground", defaultValue = False )
self.addChild( backgroundPlug )
self.addChild( Gaffer.BoolPlug( "ignoreScriptLoadErrors", defaultValue = False ) )
self.__jobPool = jobPool if jobPool else LocalDispatcher.defaultJobPool()
class Job :
Status = IECore.Enum.create( "Waiting", "Running", "Complete", "Failed", "Killed" )
def __init__( self, batch, dispatcher, name, jobId, directory ) :
assert( isinstance( batch, Gaffer.Dispatcher._TaskBatch ) )
assert( isinstance( dispatcher, Gaffer.Dispatcher ) )
self.__batch = batch
## \todo Stop storing this. It's just a temptation to access potentially
# invalid data during background dispatches - all dispatcher settings _must_
# be copied to the job upon construction, because nothing stops a user changing
# the dispatcher settings during a background dispatch. Currently __dispatcher
# is used to access the JobPool in __reportCompleted etc - instead the job should
# use signals to report changes in status, and the JobPool should connect to those
# signals. Jobs should be blissfully ignorant of JobPools.
self.__dispatcher = dispatcher
script = batch.requirements()[0].node().scriptNode()
self.__context = Gaffer.Context( script.context() )
self.__name = name
self.__id = jobId
self.__directory = directory
self.__stats = {}
self.__ignoreScriptLoadErrors = dispatcher["ignoreScriptLoadErrors"].getValue()
self.__messageHandler = IECore.CapturingMessageHandler()
self.__messageTitle = "%s : Job %s %s" % ( self.__dispatcher.getName(), self.__name, self.__id )
scriptFileName = script["fileName"].getValue()
self.__scriptFile = os.path.join( self.__directory, os.path.basename( scriptFileName ) if scriptFileName else "untitled.gfr" )
script.serialiseToFile( self.__scriptFile )
self.__storeNodeNames( script, batch )
self.__setStatus( batch, LocalDispatcher.Job.Status.Waiting, recursive = True )
def name( self ) :
return self.__name
def id( self ) :
return self.__id
def directory( self ) :
return self.__directory
def description( self ) :
batch = self.__currentBatch( self.__batch )
if batch is None or batch.node() is None :
return "N/A"
frames = str( IECore.frameListFromList( [ int(x) for x in batch.frames() ] ) )
return "Executing " + batch.blindData()["nodeName"].value + " on frames " + frames
def statistics( self ) :
batch = self.__currentBatch( self.__batch )
if batch is None or "pid" not in batch.blindData().keys() :
return {}
rss = 0
pcpu = 0.0
pid = batch.blindData().get( "pid" )
try :
stats = subprocess.Popen( ( "ps -Ao pid,ppid,pgid,sess,pcpu,rss" ).split( " " ), stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate()[0].split()
for i in range( 0, len(stats), 6 ) :
if str(pid) in stats[i:i+4] :
pcpu += float(stats[i+4])
rss += float(stats[i+5])
except :
return {}
return {
"pid" : pid,
"pcpu" : pcpu,
"rss" : rss,
}
def messageHandler( self ) :
return self.__messageHandler
def execute( self, background = False ) :
if background :
with self.__messageHandler :
if not self.__preBackgroundDispatch( self.__batch ) :
return
threading.Thread( target = self.__backgroundDispatch ).start()
else :
with self.__messageHandler :
self.__foregroundDispatch( self.__batch )
self.__reportCompleted( self.__batch )
def failed( self ) :
return self.__getStatus( self.__batch ) == LocalDispatcher.Job.Status.Failed
def kill( self ) :
if not self.failed() :
self.__kill( self.__batch )
def killed( self ) :
return "killed" in self.__batch.blindData().keys()
def _fail( self ) :
self.__setStatus( self.__batch, LocalDispatcher.Job.Status.Failed )
def __kill( self, batch ) :
# this doesn't set the status to Killed because that could
# run into a race condition with a background dispatch.
batch.blindData()["killed"] = IECore.BoolData( True )
for requirement in batch.requirements() :
self.__kill( requirement )
def __foregroundDispatch( self, batch ) :
for currentBatch in batch.requirements() :
if not self.__foregroundDispatch( currentBatch ) :
return False
if batch.blindData().get( "killed" ) :
self.__reportKilled( batch )
return False
if not batch.node() or self.__getStatus( batch ) == LocalDispatcher.Job.Status.Complete :
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
return True
description = "executing %s on %s" % ( batch.blindData()["nodeName"].value, str(batch.frames()) )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, description )
try :
self.__setStatus( batch, LocalDispatcher.Job.Status.Running )
batch.execute()
except :
traceback.print_exc()
self.__reportFailed( batch )
return False
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
return True
def __preBackgroundDispatch( self, batch ) :
if batch.node() and batch.node()["dispatcher"]["local"]["executeInForeground"].getValue() :
if not self.__foregroundDispatch( batch ) :
return False
else :
for currentBatch in batch.requirements() :
if not self.__preBackgroundDispatch( currentBatch ) :
return False
return True
def __backgroundDispatch( self ) :
with self.__messageHandler :
self.__doBackgroundDispatch( self.__batch )
def __doBackgroundDispatch( self, batch ) :
if self.__getStatus( batch ) == LocalDispatcher.Job.Status.Complete :
return True
for currentBatch in batch.requirements() :
if not self.__doBackgroundDispatch( currentBatch ) :
return False
if batch.blindData().get( "killed" ) :
self.__reportKilled( batch )
return False
if not batch.node() :
self.__reportCompleted( batch )
return True
if isinstance( batch.node(), Gaffer.TaskList ) :
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, "Finished " + batch.blindData()["nodeName"].value )
return True
taskContext = batch.context()
frames = str( IECore.frameListFromList( [ int(x) for x in batch.frames() ] ) )
args = [
"gaffer", "execute",
"-script", self.__scriptFile,
"-nodes", batch.blindData()["nodeName"].value,
"-frames", frames,
]
if self.__ignoreScriptLoadErrors :
args.append( "-ignoreScriptLoadErrors" )
contextArgs = []
for entry in [ k for k in taskContext.keys() if k != "frame" and not k.startswith( "ui:" ) ] :
if entry not in self.__context.keys() or taskContext[entry] != self.__context[entry] :
contextArgs.extend( [ "-" + entry, repr(taskContext[entry]) ] )
if contextArgs :
args.extend( [ "-context" ] + contextArgs )
self.__setStatus( batch, LocalDispatcher.Job.Status.Running )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, " ".join( args ) )
process = subprocess.Popen( args, preexec_fn=os.setsid )
batch.blindData()["pid"] = IECore.IntData( process.pid )
while process.poll() is None :
if batch.blindData().get( "killed" ) :
os.killpg( process.pid, signal.SIGTERM )
self.__reportKilled( batch )
return False
time.sleep( 0.01 )
if process.returncode :
self.__reportFailed( batch )
return False
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
return True
def __getStatus( self, batch ) :
return LocalDispatcher.Job.Status( batch.blindData().get( "status", IECore.IntData( int(LocalDispatcher.Job.Status.Waiting) ) ).value )
def __setStatus( self, batch, status, recursive = False ) :
batch.blindData()["status"] = IECore.IntData( int(status) )
if recursive :
for requirement in batch.requirements() :
self.__setStatus( requirement, status, recursive = True )
def __reportCompleted( self, batch ) :
self.__setStatus( batch, LocalDispatcher.Job.Status.Complete )
self.__dispatcher.jobPool()._remove( self )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, "Dispatched all tasks for " + self.name() )
def __reportFailed( self, batch ) :
self.__setStatus( batch, LocalDispatcher.Job.Status.Failed )
self.__dispatcher.jobPool()._fail( self )
frames = str( IECore.frameListFromList( [ int(x) for x in batch.frames() ] ) )
IECore.msg( IECore.MessageHandler.Level.Error, self.__messageTitle, "Failed to execute " + batch.blindData()["nodeName"].value + " on frames " + frames )
def __reportKilled( self, batch ) :
self.__setStatus( batch, LocalDispatcher.Job.Status.Killed )
self.__dispatcher.jobPool()._remove( self )
IECore.msg( IECore.MessageHandler.Level.Info, self.__messageTitle, "Killed " + self.name() )
def __currentBatch( self, batch ) :
if self.__getStatus( batch ) == LocalDispatcher.Job.Status.Running :
return batch
for requirement in batch.requirements() :
batch = self.__currentBatch( requirement )
if batch is not None :
return batch
return None
def __storeNodeNames( self, script, batch ) :
if batch.node() :
batch.blindData()["nodeName"] = batch.node().relativeName( script )
for requirement in batch.requirements() :
self.__storeNodeNames( script, requirement )
class JobPool( IECore.RunTimeTyped ) :
def __init__( self ) :
self.__jobs = []
self.__failedJobs = []
self.__jobAddedSignal = Gaffer.Signal1()
self.__jobRemovedSignal = Gaffer.Signal1()
self.__jobFailedSignal = Gaffer.Signal1()
def jobs( self ) :
return list(self.__jobs)
def failedJobs( self ) :
return list(self.__failedJobs)
def waitForAll( self ) :
while len(self.__jobs) :
time.sleep( 0.2 )
def jobAddedSignal( self ) :
return self.__jobAddedSignal
def jobRemovedSignal( self ) :
return self.__jobRemovedSignal
def jobFailedSignal( self ) :
return self.__jobFailedSignal
def _append( self, job ) :
assert( isinstance( job, LocalDispatcher.Job ) )
self.__jobs.append( job )
self.jobAddedSignal()( job )
def _remove( self, job, force = False ) :
if job in self.__jobs :
self.__jobs.remove( job )
self.jobRemovedSignal()( job )
if force and job in self.__failedJobs :
self.__failedJobs.remove( job )
def _fail( self, job ) :
if job in self.__jobs and job not in self.__failedJobs :
job._fail()
self.__failedJobs.append( job )
self.jobFailedSignal()( job )
self._remove( job )
__jobPool = JobPool()
@staticmethod
def defaultJobPool() :
return LocalDispatcher.__jobPool
def jobPool( self ) :
return self.__jobPool
def _doDispatch( self, batch ) :
job = LocalDispatcher.Job(
batch = batch,
dispatcher = self,
name = Gaffer.Context.current().substitute( self["jobName"].getValue() ),
jobId = os.path.basename( self.jobDirectory() ),
directory = self.jobDirectory(),
)
self.__jobPool._append( job )
job.execute( background = self["executeInBackground"].getValue() )
@staticmethod
def _doSetupPlugs( parentPlug ) :
if "local" not in parentPlug :
localPlug = Gaffer.CompoundPlug( "local" )
parentPlug.addChild( localPlug )
parentPlug["local"].clearChildren()
foregroundPlug = Gaffer.BoolPlug( "executeInForeground", defaultValue = False )
parentPlug["local"].addChild( foregroundPlug )
IECore.registerRunTimeTyped( LocalDispatcher, typeName = "Gaffer::LocalDispatcher" )
IECore.registerRunTimeTyped( LocalDispatcher.JobPool, typeName = "Gaffer::LocalDispatcher::JobPool" )
Gaffer.Dispatcher.registerDispatcher( "Local", LocalDispatcher, setupPlugsFn = LocalDispatcher._doSetupPlugs )
|
NetworkClientGUI.py
|
import re
import time
import netifaces as netifaces
from PyQt5 import QtWidgets
from PyQt5.uic.properties import QtGui
from RigParams import RigParams
from OmniRigQTControls import OmniRigQTControls
__author__ = '@sldmk'
import random
from PyQt5.QtGui import QPalette, QColor
import sys
from PyQt5.QtCore import Qt
from threading import Thread, Timer
import socket
import pyaudio
from pyaudio import Stream
from OPUSCodecImpl import OpusCodec
from PyQt5.QtWidgets import QWidget, QApplication, QDesktopWidget, QHBoxLayout, QVBoxLayout, QPushButton, QLabel, \
QComboBox, QGridLayout, QSpinBox, QLineEdit, QCheckBox
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.omniRigQTpanel = OmniRigQTControls(True, self.sendCommandToSrever)
self.commandForServer = ''
self.initUI()
self.windowCenter()
self.devicesOut = None
self.clientTCPSocket = None
self.populateDeviceList()
self.populateInternalIPAddresses()
self.setInitialGUIParameters()
self.waitCommandsFromServer = False
self.codecBitrate = 12000
audioPlayer.setAverageDataLabel(self.labelEncodedDataCount)
def setInitialGUIParameters(self):
defaultIPAddress = self.getDefaultIPAddress()
self.txtServerAddr.setText(defaultIPAddress)
index = self.comboBoxClientIPAddr.findText(defaultIPAddress, Qt.MatchFixedString)
if index >= 0:
self.comboBoxClientIPAddr.setCurrentIndex(index)
def initUI(self):
self.redColorPalette = QPalette()
self.greenColorPalette = QPalette()
self.redColorPalette.setColor(QPalette.WindowText, QColor("red"))
self.greenColorPalette.setColor(QPalette.WindowText, QColor("green"))
self.setGeometry(0, 0, 430, 250)
self.setMinimumWidth(400)
self.setWindowTitle('Voice Transcoder client v 0.1')
self.startStopBtn = QPushButton("Connect and play", self)
self.startStopBtn.clicked.connect(self.startStopBtnClick)
exitButton = QPushButton("Exit")
exitButton.clicked.connect(self.exitBtnClick)
self.labelServerAddr = QLabel('Server address:')
self.labelServerPort = QLabel('Server port:')
self.txtServerAddr = QLineEdit('127.0.0.1')
self.spinServerPort = QSpinBox()
self.spinServerPort.setMaximum(65535)
self.spinServerPort.setMinimum(1025)
self.spinServerPort.setValue(9518)
self.labelClientIPAddr = QLabel('Select interface:')
self.comboBoxClientIPAddr = QComboBox(self)
self.chkBoxfixedClientPort = QCheckBox('Fixed client UDP port:')
self.chkBoxfixedClientPort.stateChanged.connect(self.chkBoxfixedClientPortClick)
self.spinClientPort = QSpinBox()
self.spinClientPort.setMaximum(65535)
self.spinClientPort.setMinimum(1025)
random.seed()
self.spinClientPort.setValue(random.randint(10000, 65535))
self.spinClientPort.setEnabled(False)
labelEncodedDataTxt = QLabel('Encoded data from server kBytes/sec: ')
self.labelEncodedDataCount = QLabel('0')
self.labelClientStatus = QLabel('Client is stopped')
self.labelClientStatus.setPalette(self.redColorPalette)
self.labelClientStatus.setWordWrap(True)
self.labelOutput = QLabel('Output device: ')
self.comboBoxOutput = QComboBox(self)
label = QLabel()
label.setFrameStyle(QtWidgets.QFrame.HLine | QtWidgets.QFrame.Plain)
label.setLineWidth(1)
grid = QGridLayout()
grid.setSpacing(6)
grid.addWidget(self.labelOutput, 0, 0)
grid.addWidget(self.comboBoxOutput, 0, 1, 1, 4)
grid.addWidget(QLabel(''), 1, 0)
grid.addWidget(self.labelServerAddr, 2, 0, 1, 2)
grid.addWidget(self.txtServerAddr, 2, 2)
grid.addWidget(self.labelServerPort, 3, 0, 1, 2)
grid.addWidget(self.spinServerPort, 3, 2)
grid.addWidget(self.labelClientIPAddr, 4, 0, 1, 2)
grid.addWidget(self.comboBoxClientIPAddr, 4, 2)
grid.addWidget(self.chkBoxfixedClientPort, 5, 0, 1, 2)
grid.addWidget(self.spinClientPort, 5, 2)
grid.addWidget(QLabel(''), 6, 0)
grid.addWidget(labelEncodedDataTxt, 7, 0, 1, 2)
grid.addWidget(self.labelEncodedDataCount, 7, 2)
grid.addWidget(QLabel(''), 8, 0)
grid.addWidget(self.labelClientStatus, 9, 0, 1, 4)
grid.addWidget(label, 10, 0, 1, 6)
grid.addLayout(self.omniRigQTpanel.getGUI(), 11, 0, 1, 6)
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.startStopBtn)
hbox.addWidget(exitButton)
label2 = QLabel()
label2.setFrameStyle(QtWidgets.QFrame.HLine | QtWidgets.QFrame.Plain)
label2.setLineWidth(1)
vbox = QVBoxLayout()
vbox.addLayout(grid)
vbox.addStretch(1)
vbox.addWidget(label2)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.show()
def populateInternalIPAddresses(self):
ifaces = netifaces.interfaces()
for iface in ifaces:
ifaceDetails = netifaces.ifaddresses(iface)
ifaceDetailsDict = ifaceDetails.get(netifaces.AF_INET)
if ifaceDetailsDict is not None:
self.comboBoxClientIPAddr.addItem(ifaceDetailsDict[0]['addr'])
def getDefaultIPAddress(self):
gws = netifaces.gateways()
defgw = gws.get('default')
if defgw is not None:
ifaceDetails = netifaces.ifaddresses(defgw[netifaces.AF_INET][1])
ifaceDetailsDict = ifaceDetails.get(netifaces.AF_INET)
if ifaceDetailsDict is not None:
return ifaceDetailsDict[0]['addr']
def chkBoxfixedClientPortClick(self):
self.spinClientPort.setEnabled(self.chkBoxfixedClientPort.isChecked())
def windowCenter(self):
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.move(qtRectangle.topLeft())
def populateDeviceList(self):
self.devicesOut = audioPlayer.getAudioOutputDevices()
for dev in self.devicesOut:
self.comboBoxOutput.addItem(self.devicesOut.get(dev))
index = self.comboBoxOutput.findText(self.devicesOut.get(audioPlayer.getDefaultAudioOutDeviceIndex()), Qt.MatchFixedString)
if index >= 0:
self.comboBoxOutput.setCurrentIndex(index)
def exitBtnClick(self):
if audioPlayer.isActive:
self.stopAudioPlaying(True)
sys.exit(0)
def startStopBtnClick(self):
if audioPlayer.isActive:
self.stopAudioPlaying(True)
else:
self.startAudioPlaying()
self.switchUIControls()
def switchUIControls(self):
self.labelClientIPAddr.setEnabled(not audioPlayer.isActive)
self.comboBoxClientIPAddr.setEnabled(not audioPlayer.isActive)
self.labelOutput.setEnabled(not audioPlayer.isActive)
self.comboBoxOutput.setEnabled(not audioPlayer.isActive)
self.labelServerAddr.setEnabled(not audioPlayer.isActive)
self.txtServerAddr.setEnabled(not audioPlayer.isActive)
self.labelServerPort.setEnabled(not audioPlayer.isActive)
self.spinServerPort.setEnabled(not audioPlayer.isActive)
self.chkBoxfixedClientPort.setEnabled(not audioPlayer.isActive)
self.spinClientPort.setEnabled(not audioPlayer.isActive and self.chkBoxfixedClientPort.isChecked())
def startAudioPlaying(self):
idxDevOut = self.getKeyByValue(self.devicesOut, self.comboBoxOutput.currentText())
self.labelClientStatus.setText("Trying to connect...")
app.processEvents()
if self.connectToServer(self.txtServerAddr.text(), self.spinServerPort.value()) == True:
audioPlayer.startRecv(idxDevOut, self.comboBoxClientIPAddr.currentText(), self.spinClientPort.value())
self.labelClientStatus.setPalette(self.greenColorPalette)
self.startStopBtn.setText('Stop')
def stopAudioPlaying(self, sendFIN):
try:
self.waitCommandsFromServer = False
audioPlayer.stopRecvAndAudio()
if sendFIN == True:
self.clientTCPSocket.send('request=stopstream'.encode())
self.clientTCPSocket.close()
except Exception as err:
print(str(err))
self.startStopBtn.setText('Connect and play')
self.labelClientStatus.setText('Client is stopped')
self.labelClientStatus.setPalette(self.redColorPalette)
self.switchUIControls()
def connectToServer(self, address, port):
try:
# Create a socket connection for connecting to the server:
self.clientTCPSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clientTCPSocket.connect((address, port))
# send hello to server
initialHello = socket.gethostname() + ('type=hello|version=1.0')
self.clientTCPSocket.send(initialHello.encode())
# receive server first reply
reply = self.clientTCPSocket.recv(1024).decode()
print(reply)
selectedUDPPort = 'udpport=' + str(self.spinClientPort.value())
self.clientTCPSocket.send(selectedUDPPort.encode())
reply = self.clientTCPSocket.recv(1024).decode()
print(reply)
if reply.find("setbitrate", 0, len(reply)) != -1:
bitrate = reply.partition("=")[2]
audioPlayer.setCodecBitrate(bitrate)
self.labelClientStatus.setText('Connected to server: '+address+', offered bitrate: '+bitrate)
self.waitCommandsFromServer = True
Thread(target=self.handleMessagesFromServer, args=()).start()
return True
except socket.error as err:
self.labelClientStatus.setText(err.strerror)
return False
def handleMessagesFromServer(self):
while self.waitCommandsFromServer:
try:
reply = self.clientTCPSocket.recv(1024).decode()
print(reply)
# below we handle other commands from server
if reply.find("stopaudio", 0, len(reply)) != -1:
self.stopAudioPlaying(False)
print("Disconnect command received")
if reply.find("rigsinfo", 0, len(reply)) != -1:
m = re.search('.*rigsinfo\|f1=(\d+),t1=(.+),m1=(\w+),s1=(.+),f2=(\d+),t2=(.+),m2=(\w+),s2=(.+).*', reply)
self.omniRigInfo = {
'1': RigParams,
'2': RigParams
}
self.rig1ModeText = ''
self.rig2ModeText = ''
self.rig1 = RigParams()
self.rig2 = RigParams()
self.omniRigInfo['1'] = self.rig1
self.omniRigInfo['2'] = self.rig2
self.rig1.setRigStatus(m.group(4))
self.rig1.setRigFreq(m.group(1))
self.rig1.setRigType(m.group(2))
self.rig1.setRigMode(m.group(3))
self.rig2.setRigStatus(m.group(8))
self.rig2.setRigFreq(m.group(5))
self.rig2.setRigType(m.group(6))
self.rig2.setRigMode(m.group(7))
self.omniRigQTpanel.setRigInformation(self.omniRigInfo)
except socket.error as msg:
print(msg)
audioPlayer.stopRecvAndAudio()
self.waitCommandsFromServer = False
pass
print("Handle additional commands finished")
def getKeyByValue(self, searchDict, searchText):
for key, value in searchDict.items():
if value == searchText:
return key
return -1
def sendCommandToSrever(self, command):
if len(command) > 0:
self.clientTCPSocket.send(command.encode())
command = ''
class StreamAudioPlayer():
def __init__(self, codecBitrate):
self.isActive = False
self.audioOut = pyaudio.PyAudio()
self.info = self.audioOut.get_host_api_info_by_index(0)
self.numdevices = self.info.get('deviceCount')
self.idxDevOut = 0
self.frames_per_buffer = 1920
self.dataLst = []
self.labelAverageDataCount = QLabel
self.streamOut = Stream(self, rate=48000, channels=1, format=pyaudio.paInt16, input=False, output=True)
self.streamOut.stop_stream()
self.codec = OpusCodec(channels=1, rate=48000, frame_size=self.frames_per_buffer, bitrate=codecBitrate)
def setCodecBitrate(self, codecBitrate):
self.codec.setBitrate(codecBitrate)
def startRecv(self, devOut, intfAddr, udpPort):
chunk = self.frames_per_buffer
self.isActive = True
self.streamOut = self.audioOut.open(format=pyaudio.paInt16, channels=1,
rate=48000, input=False, output=True,
output_device_index=devOut,
frames_per_buffer=self.frames_per_buffer)
Thread(target=self.udpStream, args=(chunk, intfAddr, udpPort)).start()
Timer(1.0, function=self.calculateAverage).start()
def stopRecvAndAudio(self):
self.isActive = False
self.streamOut.stop_stream()
def calculateAverage(self):
# print(round(sum(self.dataLst) / 1024, 2))
self.labelAverageDataCount.setText(str(round(sum(self.dataLst) / 1024, 2)))
self.dataLst = []
if self.isActive:
Timer(1.0, function=self.calculateAverage).start()
else:
self.labelAverageDataCount.setText('0')
def udpStream(self, chunk, intfAddr, udpPort):
udpReceiveSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udpReceiveSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
udpReceiveSocket.bind((intfAddr, udpPort))
print("UDP socket binded to local address: " + str(intfAddr))
while self.isActive:
soundData, addr = udpReceiveSocket.recvfrom(chunk)
if len(soundData) > 35:
self.dataLst.append(len(soundData))
opusdecoded_data = self.codec.decode(soundData)
if len(opusdecoded_data) > 100:
self.streamOut.write(opusdecoded_data)
udpReceiveSocket.close()
print("socket closed")
def getDefaultAudioOutDeviceIndex(self):
return self.audioOut.get_default_output_device_info()["index"]
def getAudioOutputDevices(self):
devices = {}
for i in range(0, self.numdevices):
devOut = self.audioOut.get_device_info_by_host_api_device_index(0, i)
if (devOut.get('maxOutputChannels')) > 0:
d = {devOut.get('index'): devOut.get('name')}
devices.update(d)
return devices
def setAverageDataLabel(self, label):
self.labelAverageDataCount = label
if __name__ == '__main__':
app = QApplication(sys.argv)
audioPlayer = StreamAudioPlayer(24000)
ex = MainWindow()
sys.exit(app.exec_())
|
process_name_1.py
|
from multiprocessing import Process, current_process
import time
import os
def fun(val):
process = os.getpid()
parent_process = os.getppid()
name = current_process().name
print('==='*15 + ' < ' + f'{name}' + ' > ' + '==='*15)
print(f'starting fun Process={process} VAL={val}')
print(f'Parent ID={parent_process}')
time.sleep(val)
print(f'finishing fun Process={process} VAL={val}')
print('==='*15)
def main():
p1 = Process(target=fun, args=(3, ))
p1.start()
p1.join()
print(f'Process p is alive: {p1.is_alive()}')
p2 = Process(target=fun, args=(2,))
p2.start()
p2.join()
print(f'Process p is alive: {p2.is_alive()}')
p3 = Process(target=fun, args=(1,))
p3.start()
p3.join()
print(f'Process p is alive: {p3.is_alive()}')
if __name__ == '__main__':
print('starting main')
main()
print('finishing main')
# NOTE:FAQ The os.getpid returns the current process Id,
# while the os.getppid returns the parent's process Id.
# The parent Id is the same, the process Ids are different for each child process.
|
utils.py
|
import aioboto3
import boto3
import random
import yaml
import multiprocessing as mp
from runners.helpers import db
from runners.helpers.dbconfig import ROLE as SA_ROLE
def updated(d=None, *ds, **kwargs):
"""Shallow merges dictionaries together, mutating + returning first arg"""
if d is None:
d = {}
for new_d in ds:
if new_d is not None:
d.update(new_d)
if kwargs:
d.update(kwargs)
return d
def qmap_mp(num_threads, f, args):
payloads = mp.JoinableQueue()
procs = []
def add_task(arg):
payloads.put(arg)
def process_task():
while True:
payload = payloads.get()
try:
f(payload, add_task)
finally:
payloads.task_done()
for arg in args:
add_task(arg)
procs = [mp.Process(target=process_task) for _ in range(num_threads)]
for p in procs:
p.start()
payloads.join()
for p in procs:
p.kill()
def sts_assume_role(src_role_arn, dest_role_arn, dest_external_id=None):
session_name = ''.join(random.choice('0123456789ABCDEF') for i in range(16))
src_role = boto3.client('sts').assume_role(
RoleArn=src_role_arn, RoleSessionName=session_name
)
sts_client = boto3.Session(
aws_access_key_id=src_role['Credentials']['AccessKeyId'],
aws_secret_access_key=src_role['Credentials']['SecretAccessKey'],
aws_session_token=src_role['Credentials']['SessionToken'],
).client('sts')
sts_role = (
sts_client.assume_role(
RoleArn=dest_role_arn,
RoleSessionName=session_name,
ExternalId=dest_external_id,
)
if dest_external_id
else sts_client.assume_role(RoleArn=dest_role_arn, RoleSessionName=session_name)
)
return boto3.Session(
aws_access_key_id=sts_role['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_role['Credentials']['SecretAccessKey'],
aws_session_token=sts_role['Credentials']['SessionToken'],
)
async def aio_sts_assume_role(src_role_arn, dest_role_arn, dest_external_id=None):
session_name = ''.join(random.choice('0123456789ABCDEF') for i in range(16))
async with aioboto3.client('sts') as sts:
src_role = await sts.assume_role(
RoleArn=src_role_arn, RoleSessionName=session_name
)
async with aioboto3.Session(
aws_access_key_id=src_role['Credentials']['AccessKeyId'],
aws_secret_access_key=src_role['Credentials']['SecretAccessKey'],
aws_session_token=src_role['Credentials']['SessionToken'],
).client('sts') as sts_client:
sts_role = await (
sts_client.assume_role(
RoleArn=dest_role_arn,
RoleSessionName=session_name,
ExternalId=dest_external_id,
)
if dest_external_id
else sts_client.assume_role(
RoleArn=dest_role_arn, RoleSessionName=session_name
)
)
return aioboto3.Session(
aws_access_key_id=sts_role['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_role['Credentials']['SecretAccessKey'],
aws_session_token=sts_role['Credentials']['SessionToken'],
)
def yaml_dump(**kwargs):
return yaml.dump(kwargs, default_flow_style=False, explicit_start=True)
def bytes_to_str(x):
return x.decode() if type(x) is bytes else x
def create_metadata_table(table, cols, addition):
db.create_table(table, cols, ifnotexists=True)
db.execute(f"GRANT INSERT, SELECT ON {table} TO ROLE {SA_ROLE}")
table_names = (row['name'] for row in db.fetch(f'desc table {table}'))
if any(name == addition[0].upper() for name in table_names):
return
db.execute(f'ALTER TABLE {table} ADD COLUMN {addition[0]} {addition[1]}')
|
tvhProxy.py
|
from gevent import monkey
monkey.patch_all()
import json
from dotenv import load_dotenv
from ssdp import SSDPServer
from flask import Flask, Response, request, jsonify, abort, render_template
from gevent.pywsgi import WSGIServer
import xml.etree.ElementTree as ElementTree
from datetime import timedelta, datetime, time
import logging
import socket
import threading
from requests.auth import HTTPDigestAuth
import requests
import os
import sched
logging.basicConfig(level=logging.INFO)
load_dotenv(verbose=True)
app = Flask(__name__)
scheduler = sched.scheduler()
logger = logging.getLogger()
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
# URL format: <protocol>://<username>:<password>@<hostname>:<port>, example: https://test:1234@localhost:9981
config = {
'deviceID': os.environ.get('DEVICE_ID') or '12345678',
'bindAddr': os.environ.get('TVH_BINDADDR') or '',
# only used if set (in case of forward-proxy)
'tvhURL': os.environ.get('TVH_URL') or 'http://localhost:9981',
'tvhProxyURL': os.environ.get('TVH_PROXY_URL'),
'tvhProxyHost': os.environ.get('TVH_PROXY_HOST') or host_ip,
'tvhProxyPort': os.environ.get('TVH_PROXY_PORT') or 5004,
'tvhUser': os.environ.get('TVH_USER') or '',
'tvhPassword': os.environ.get('TVH_PASSWORD') or '',
# number of tuners in tvh
'tunerCount': os.environ.get('TVH_TUNER_COUNT') or 6,
'tvhWeight': os.environ.get('TVH_WEIGHT') or 300, # subscription priority
# usually you don't need to edit this
'chunkSize': os.environ.get('TVH_CHUNK_SIZE') or 1024*1024,
# specifiy a stream profile that you want to use for adhoc transcoding in tvh, e.g. mp4
'streamProfile': os.environ.get('TVH_PROFILE') or 'pass'
}
discoverData = {
'FriendlyName': 'tvhProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': int(config['tunerCount']),
'FirmwareVersion': '20150826',
'DeviceID': config['deviceID'],
'DeviceAuth': 'test1234',
'BaseURL': '%s' % (config['tvhProxyURL'] or "http://" + config['tvhProxyHost'] + ":" + str(config['tvhProxyPort'])),
'LineupURL': '%s/lineup.json' % (config['tvhProxyURL'] or "http://" + config['tvhProxyHost'] + ":" + str(config['tvhProxyPort']))
}
@app.route('/discover.json')
def discover():
return jsonify(discoverData)
@app.route('/lineup_status.json')
def status():
return jsonify({
'ScanInProgress': 0,
'ScanPossible': 0,
'Source': "Cable",
'SourceList': ['Cable']
})
@app.route('/lineup.json')
def lineup():
lineup = []
for c in _get_channels():
if c['enabled']:
url = '%s/stream/channel/%s?profile=%s&weight=%s' % (
config['tvhURL'], c['uuid'], config['streamProfile'], int(config['tvhWeight']))
lineup.append({'GuideNumber': str(c['number']),
'GuideName': c['name'],
'URL': url
})
return jsonify(lineup)
@app.route('/lineup.post', methods=['GET', 'POST'])
def lineup_post():
return ''
@app.route('/')
@app.route('/device.xml')
def device():
return render_template('device.xml', data=discoverData), {'Content-Type': 'application/xml'}
@app.route('/epg.xml')
def epg():
return _get_xmltv(), {'Content-Type': 'application/xml'}
def _get_channels():
url = '%s/api/channel/grid' % config['tvhURL']
params = {
'limit': 999999,
'start': 0
}
try:
r = requests.get(url, params=params, auth=HTTPDigestAuth(
config['tvhUser'], config['tvhPassword']))
return r.json()['entries']
except Exception as e:
logger.error('An error occured: %s' + repr(e))
def _get_genres():
def _findMainCategory(majorCategories, minorCategory):
prevKey, currentKey = None, None
for currentKey in sorted(majorCategories.keys()):
if(currentKey > minorCategory):
return majorCategories[prevKey]
prevKey = currentKey
return majorCategories[prevKey]
url = '%s/api/epg/content_type/list' % config['tvhURL']
params = {'full': 1}
try:
r = requests.get(url, auth=HTTPDigestAuth(
config['tvhUser'], config['tvhPassword']))
entries = r.json()['entries']
r = requests.get(url, params=params, auth=HTTPDigestAuth(
config['tvhUser'], config['tvhPassword']))
entries_full = r.json()['entries']
majorCategories = {}
genres = {}
for entry in entries:
majorCategories[entry['key']] = entry['val']
for entry in entries_full:
if not entry['key'] in majorCategories:
mainCategory = _findMainCategory(majorCategories, entry['key'])
if(mainCategory != entry['val']):
genres[entry['key']] = [mainCategory, entry['val']]
else:
genres[entry['key']] = [entry['val']]
else:
genres[entry['key']] = [entry['val']]
return genres
except Exception as e:
logger.error('An error occured: %s' + repr(e))
def _get_xmltv():
try:
url = '%s/xmltv/channels' % config['tvhURL']
r = requests.get(url, auth=HTTPDigestAuth(
config['tvhUser'], config['tvhPassword']))
logger.info('downloading xmltv from %s', r.url)
tree = ElementTree.ElementTree(
ElementTree.fromstring(requests.get(url, auth=HTTPDigestAuth(config['tvhUser'], config['tvhPassword'])).content))
root = tree.getroot()
url = '%s/api/epg/events/grid' % config['tvhURL']
params = {
'limit': 999999,
'filter': json.dumps([
{
"field": "start",
"type": "numeric",
"value": int(round(datetime.timestamp(datetime.now() + timedelta(hours=72)))),
"comparison": "lt"
}
])
}
r = requests.get(url, params=params, auth=HTTPDigestAuth(
config['tvhUser'], config['tvhPassword']))
logger.info('downloading epg grid from %s', r.url)
epg_events_grid = r.json()['entries']
epg_events = {}
event_keys = {}
for epg_event in epg_events_grid:
if epg_event['channelUuid'] not in epg_events:
epg_events[epg_event['channelUuid']] = {}
epg_events[epg_event['channelUuid']
][epg_event['start']] = epg_event
for key in epg_event.keys():
event_keys[key] = True
channelNumberMapping = {}
channelsInEPG = {}
genres = _get_genres()
for child in root:
if child.tag == 'channel':
channelId = child.attrib['id']
channelNo = child[1].text
if not channelNo:
logger.error("No channel number for: %s", channelId)
channelNo = "00"
if not child[0].text:
logger.error("No channel name for: %s", channelNo)
child[0].text = "No Name"
channelNumberMapping[channelId] = channelNo
if channelNo in channelsInEPG:
logger.error("duplicate channelNo: %s", channelNo)
channelsInEPG[channelNo] = False
channelName = ElementTree.Element('display-name')
channelName.text = str(channelNo) + " " + child[0].text
child.insert(0, channelName)
for icon in child.iter('icon'):
# check if icon exists (tvh always returns an URL even if there is no channel icon)
iconUrl = icon.attrib['src']
r = requests.head(iconUrl)
if r.status_code == requests.codes.ok:
icon.attrib['src'] = iconUrl
else:
logger.error("remove icon: %s", iconUrl)
child.remove(icon)
child.attrib['id'] = channelNo
if child.tag == 'programme':
channelUuid = child.attrib['channel']
channelNumber = channelNumberMapping[channelUuid]
channelsInEPG[channelNumber] = True
child.attrib['channel'] = channelNumber
start_datetime = datetime.strptime(
child.attrib['start'], "%Y%m%d%H%M%S %z").astimezone(tz=None).replace(tzinfo=None)
stop_datetime = datetime.strptime(
child.attrib['stop'], "%Y%m%d%H%M%S %z").astimezone(tz=None).replace(tzinfo=None)
if start_datetime >= datetime.now() + timedelta(hours=72):
# Plex doesn't like extremely large XML files, we'll remove the details from entries more than 72h in the future
# Fixed w/ plex server 1.19.2.2673
# for desc in child.iter('desc'):
# child.remove(desc)
pass
elif stop_datetime > datetime.now() and start_datetime < datetime.now() + timedelta(hours=72):
# add extra details for programs in the next 72hs
start_timestamp = int(
round(datetime.timestamp(start_datetime)))
epg_event = epg_events[channelUuid][start_timestamp]
if ('image' in epg_event):
programmeImage = ElementTree.SubElement(child, 'icon')
imageUrl = str(epg_event['image'])
if(imageUrl.startswith('imagecache')):
imageUrl = config['tvhURL'] + \
"/" + imageUrl + ".png"
programmeImage.attrib['src'] = imageUrl
if ('genre' in epg_event):
for genreId in epg_event['genre']:
for category in genres[genreId]:
programmeCategory = ElementTree.SubElement(
child, 'category')
programmeCategory.text = category
if ('episodeOnscreen' in epg_event):
episodeNum = ElementTree.SubElement(
child, 'episode-num')
episodeNum.attrib['system'] = 'onscreen'
episodeNum.text = epg_event['episodeOnscreen']
if('hd' in epg_event):
video = ElementTree.SubElement(child, 'video')
quality = ElementTree.SubElement(video, 'quality')
quality.text = "HDTV"
if('new' in epg_event):
ElementTree.SubElement(child, 'new')
else:
ElementTree.SubElement(child, 'previously-shown')
if('copyright_year' in epg_event):
date = ElementTree.SubElement(child, 'date')
date.text = str(epg_event['copyright_year'])
del epg_events[channelUuid][start_timestamp]
for key in sorted(channelsInEPG):
if channelsInEPG[key]:
logger.debug("Programmes found for channel %s", key)
else:
channelName = root.find(
'channel[@id="'+key+'"]/display-name').text
logger.error("No programme for channel %s: %s",
key, channelName)
# create 2h programmes for 72 hours
yesterday_midnight = datetime.combine(
datetime.today(), time.min) - timedelta(days=1)
date_format = '%Y%m%d%H%M%S'
for x in range(0, 36):
dummyProgramme = ElementTree.SubElement(root, 'programme')
dummyProgramme.attrib['channel'] = str(key)
dummyProgramme.attrib['start'] = (
yesterday_midnight + timedelta(hours=x*2)).strftime(date_format)
dummyProgramme.attrib['stop'] = (
yesterday_midnight + timedelta(hours=(x*2)+2)).strftime(date_format)
dummyTitle = ElementTree.SubElement(
dummyProgramme, 'title')
dummyTitle.attrib['lang'] = 'eng'
dummyTitle.text = channelName
dummyDesc = ElementTree.SubElement(dummyProgramme, 'desc')
dummyDesc.attrib['lang'] = 'eng'
dummyDesc.text = "No programming information"
logger.info("returning epg")
return ElementTree.tostring(root)
except requests.exceptions.RequestException as e: # This is the correct syntax
logger.error('An error occured: %s' + repr(e))
def _start_ssdp():
ssdp = SSDPServer()
thread_ssdp = threading.Thread(target=ssdp.run, args=())
thread_ssdp.daemon = True # Daemonize thread
thread_ssdp.start()
ssdp.register('local',
'uuid:{}::upnp:rootdevice'.format(discoverData['DeviceID']),
'upnp:rootdevice',
'http://{}:{}/device.xml'.format(
config['tvhProxyHost'], config['tvhProxyPort']),
'SSDP Server for tvhProxy')
if __name__ == '__main__':
http = WSGIServer((config['bindAddr'], int(config['tvhProxyPort'])),
app.wsgi_app, log=logger, error_log=logger)
_start_ssdp()
http.serve_forever()
|
fb2twilio.py
|
import creds
import threading
from fbchat import Client
from fbchat.models import *
from twilio.rest import Client as TwilioClient
def forwardMsg(msg):
tclient.messages.create(
to=creds.twilio['phone'],
from_=creds.twilio['twilio_phone'],
body=str(msg)
)
class CustomClient(Client):
def onMessage(self, mid, author_id, message_object, thread_id, thread_type, ts, metadata, msg, **kwargs):
author = self.fetchUserInfo(author_id).values()[0]
# print 'new msg from ({:s}):'.format(author.name), msg['delta']['body']
thr = threading.Thread(target=forwardMsg, args=['\n{:s}: '.format(author.name)+ msg['delta']['body']])
thr.start()
# if __name__ == '__main__':
# tclient = TwilioClient(creds.twilio['sid'], creds.twilio['auth'])
# fclient = CustomClient(creds.fb['email'], creds.fb['pwd'])
# fclient.listen()
tclient = TwilioClient(creds.twilio['sid'], creds.twilio['auth'])
fclient = CustomClient(creds.fb['email'], creds.fb['pwd'])
def run():
fclient.listen()
|
windows.py
|
import collections
import ctypes
import ctypes.wintypes
import os
import socket
import struct
import threading
import time
import argparse
import pydivert
import pydivert.consts
import pickle
import socketserver
PROXY_API_PORT = 8085
class Resolver:
def __init__(self):
self.socket = None
self.lock = threading.RLock()
def setup(self):
with self.lock:
TransparentProxy.setup()
self._connect()
def _connect(self):
if self.socket:
self.socket.close()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(("127.0.0.1", PROXY_API_PORT))
self.wfile = self.socket.makefile('wb')
self.rfile = self.socket.makefile('rb')
pickle.dump(os.getpid(), self.wfile)
def original_addr(self, csock):
client = csock.getpeername()[:2]
with self.lock:
try:
pickle.dump(client, self.wfile)
self.wfile.flush()
addr = pickle.load(self.rfile)
if addr is None:
raise RuntimeError("Cannot resolve original destination.")
addr = list(addr)
addr[0] = str(addr[0])
addr = tuple(addr)
return addr
except (EOFError, socket.error):
self._connect()
return self.original_addr(csock)
class APIRequestHandler(socketserver.StreamRequestHandler):
"""
TransparentProxy API: Returns the pickled server address, port tuple
for each received pickled client address, port tuple.
"""
def handle(self):
proxifier = self.server.proxifier
pid = None
try:
pid = pickle.load(self.rfile)
if pid is not None:
proxifier.trusted_pids.add(pid)
while True:
client = pickle.load(self.rfile)
server = proxifier.client_server_map.get(client, None)
pickle.dump(server, self.wfile)
self.wfile.flush()
except (EOFError, socket.error):
proxifier.trusted_pids.discard(pid)
class APIServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, proxifier, *args, **kwargs):
socketserver.TCPServer.__init__(self, *args, **kwargs)
self.proxifier = proxifier
self.daemon_threads = True
# Windows error.h
ERROR_INSUFFICIENT_BUFFER = 0x7A
# http://msdn.microsoft.com/en-us/library/windows/desktop/bb485761(v=vs.85).aspx
class MIB_TCPROW2(ctypes.Structure):
_fields_ = [
('dwState', ctypes.wintypes.DWORD),
('dwLocalAddr', ctypes.wintypes.DWORD),
('dwLocalPort', ctypes.wintypes.DWORD),
('dwRemoteAddr', ctypes.wintypes.DWORD),
('dwRemotePort', ctypes.wintypes.DWORD),
('dwOwningPid', ctypes.wintypes.DWORD),
('dwOffloadState', ctypes.wintypes.DWORD)
]
# http://msdn.microsoft.com/en-us/library/windows/desktop/bb485772(v=vs.85).aspx
def MIB_TCPTABLE2(size):
class _MIB_TCPTABLE2(ctypes.Structure):
_fields_ = [('dwNumEntries', ctypes.wintypes.DWORD),
('table', MIB_TCPROW2 * size)]
return _MIB_TCPTABLE2()
class TransparentProxy:
"""
Transparent Windows Proxy for mitmproxy based on WinDivert/PyDivert.
Requires elevated (admin) privileges. Can be started separately by manually running the file.
This module can be used to intercept and redirect all traffic that is forwarded by the user's machine and
traffic sent from the machine itself.
How it works:
(1) First, we intercept all packages that match our filter (destination port 80 and 443 by default).
We both consider traffic that is forwarded by the OS (WinDivert's NETWORK_FORWARD layer) as well as traffic
sent from the local machine (WinDivert's NETWORK layer). In the case of traffic from the local machine, we need to
distinguish between traffc sent from applications and traffic sent from the proxy. To accomplish this, we use
Windows' GetTcpTable2 syscall to determine the source application's PID.
For each intercepted package, we
1. Store the source -> destination mapping (address and port)
2. Remove the package from the network (by not reinjecting it).
3. Re-inject the package into the local network stack, but with the destination address changed to the proxy.
(2) Next, the proxy receives the forwarded packet, but does not know the real destination yet (which we overwrote
with the proxy's address). On Linux, we would now call getsockopt(SO_ORIGINAL_DST), but that unfortunately doesn't
work on Windows. However, we still have the correct source information. As a workaround, we now access the forward
module's API (see APIRequestHandler), submit the source information and get the actual destination back (which the
forward module stored in (1.3)).
(3) The proxy now establish the upstream connection as usual.
(4) Finally, the proxy sends the response back to the client. To make it work, we need to change the packet's source
address back to the original destination (using the mapping from (1.3)), to which the client believes he is talking
to.
Limitations:
- No IPv6 support. (Pull Requests welcome)
- TCP ports do not get re-used simultaneously on the client, i.e. the proxy will fail if application X
connects to example.com and example.org from 192.168.0.42:4242 simultaneously. This could be mitigated by
introducing unique "meta-addresses" which mitmproxy sees, but this would remove the correct client info from
mitmproxy.
"""
def __init__(self,
mode="both",
redirect_ports=(80, 443), custom_filter=None,
proxy_addr=False, proxy_port=8080,
api_host="localhost", api_port=PROXY_API_PORT,
cache_size=65536):
"""
:param mode: Redirection operation mode: "forward" to only redirect forwarded packets, "local" to only redirect
packets originating from the local machine, "both" to redirect both.
:param redirect_ports: if the destination port is in this tuple, the requests are redirected to the proxy.
:param custom_filter: specify a custom WinDivert filter to select packets that should be intercepted. Overrides
redirect_ports setting.
:param proxy_addr: IP address of the proxy (IP within a network, 127.0.0.1 does not work). By default,
this is detected automatically.
:param proxy_port: Port the proxy is listenting on.
:param api_host: Host the forward module API is listening on.
:param api_port: Port the forward module API is listening on.
:param cache_size: Maximum number of connection tuples that are stored. Only relevant in very high
load scenarios.
"""
if proxy_port in redirect_ports:
raise ValueError("The proxy port must not be a redirect port.")
if not proxy_addr:
# Auto-Detect local IP.
# https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
proxy_addr = s.getsockname()[0]
s.close()
self.mode = mode
self.proxy_addr, self.proxy_port = proxy_addr, proxy_port
self.connection_cache_size = cache_size
self.client_server_map = collections.OrderedDict()
self.api = APIServer(self, (api_host, api_port), APIRequestHandler)
self.api_thread = threading.Thread(target=self.api.serve_forever)
self.api_thread.daemon = True
self.request_filter = custom_filter or " or ".join(
("tcp.DstPort == %d" %
p) for p in redirect_ports)
self.request_forward_handle = None # type: pydivert.WinDivert
self.request_forward_thread = threading.Thread(
target=self.request_forward)
self.request_forward_thread.daemon = True
self.addr_pid_map = dict()
self.trusted_pids = set()
self.tcptable2 = MIB_TCPTABLE2(0)
self.tcptable2_size = ctypes.wintypes.DWORD(0)
self.request_local_handle = None # type: pydivert.WinDivert
self.request_local_thread = threading.Thread(target=self.request_local)
self.request_local_thread.daemon = True
# The proxy server responds to the client. To the client,
# this response should look like it has been sent by the real target
self.response_filter = "outbound and tcp.SrcPort == %d" % proxy_port
self.response_handle = None # type: pydivert.WinDivert
self.response_thread = threading.Thread(target=self.response)
self.response_thread.daemon = True
self.icmp_handle = None # type: pydivert.WinDivert
@classmethod
def setup(cls):
# TODO: Make sure that server can be killed cleanly. That's a bit difficult as we don't have access to
# controller.should_exit when this is called.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_unavailable = s.connect_ex(("127.0.0.1", PROXY_API_PORT))
if server_unavailable:
proxifier = TransparentProxy()
proxifier.start()
def start(self):
self.api_thread.start()
# Block all ICMP requests (which are sent on Windows by default).
# In layman's terms: If we don't do this, our proxy machine tells the client that it can directly connect to the
# real gateway if they are on the same network.
self.icmp_handle = pydivert.WinDivert(
filter="icmp",
layer=pydivert.Layer.NETWORK,
flags=pydivert.Flag.DROP
)
self.icmp_handle.open()
self.response_handle = pydivert.WinDivert(
filter=self.response_filter,
layer=pydivert.Layer.NETWORK
)
self.response_handle.open()
self.response_thread.start()
if self.mode == "forward" or self.mode == "both":
self.request_forward_handle = pydivert.WinDivert(
filter=self.request_filter,
layer=pydivert.Layer.NETWORK_FORWARD
)
self.request_forward_handle.open()
self.request_forward_thread.start()
if self.mode == "local" or self.mode == "both":
self.request_local_handle = pydivert.WinDivert(
filter=self.request_filter,
layer=pydivert.Layer.NETWORK
)
self.request_local_handle.open()
self.request_local_thread.start()
def shutdown(self):
if self.mode == "local" or self.mode == "both":
self.request_local_handle.close()
if self.mode == "forward" or self.mode == "both":
self.request_forward_handle.close()
self.response_handle.close()
self.icmp_handle.close()
self.api.shutdown()
def recv(self, handle: pydivert.WinDivert) -> pydivert.Packet:
"""
Convenience function that receives a packet from the passed handler and handles error codes.
If the process has been shut down, (None, None) is returned.
"""
try:
return handle.recv()
except WindowsError as e:
if e.winerror == 995:
return None
else:
raise
def fetch_pids(self):
ret = ctypes.windll.iphlpapi.GetTcpTable2(
ctypes.byref(
self.tcptable2), ctypes.byref(
self.tcptable2_size), 0)
if ret == ERROR_INSUFFICIENT_BUFFER:
self.tcptable2 = MIB_TCPTABLE2(self.tcptable2_size.value)
self.fetch_pids()
elif ret == 0:
for row in self.tcptable2.table[:self.tcptable2.dwNumEntries]:
local = (
socket.inet_ntoa(struct.pack('L', row.dwLocalAddr)),
socket.htons(row.dwLocalPort)
)
self.addr_pid_map[local] = row.dwOwningPid
else:
raise RuntimeError("Unknown GetTcpTable2 return code: %s" % ret)
def request_local(self):
while True:
packet = self.recv(self.request_local_handle)
if not packet:
return
client = (packet.src_addr, packet.src_port)
if client not in self.addr_pid_map:
self.fetch_pids()
# If this fails, we most likely have a connection from an external client to
# a local server on 80/443. In this, case we always want to proxy
# the request.
pid = self.addr_pid_map.get(client, None)
if pid not in self.trusted_pids:
self._request(packet)
else:
self.request_local_handle.send(packet, recalculate_checksum=False)
def request_forward(self):
"""
Redirect packages to the proxy
"""
while True:
packet = self.recv(self.request_forward_handle)
if not packet:
return
self._request(packet)
def _request(self, packet: pydivert.Packet):
# print(" * Redirect client -> server to proxy")
# print("%s:%s -> %s:%s" % (packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port))
client = (packet.src_addr, packet.src_port)
server = (packet.dst_addr, packet.dst_port)
if client in self.client_server_map:
self.client_server_map.move_to_end(client)
else:
while len(self.client_server_map) > self.connection_cache_size:
self.client_server_map.popitem(False)
self.client_server_map[client] = server
packet.dst_addr, packet.dst_port = self.proxy_addr, self.proxy_port
packet.direction = pydivert.consts.Direction.INBOUND
# Use any handle thats on the NETWORK layer - request_local may be
# unavailable.
self.response_handle.send(packet)
def response(self):
"""
Spoof source address of packets send from the proxy to the client
"""
while True:
packet = self.recv(self.response_handle)
if not packet:
return
# If the proxy responds to the client, let the client believe the target server sent the packets.
# print(" * Adjust proxy -> client")
client = (packet.dst_addr, packet.dst_port)
server = self.client_server_map.get(client, None)
if server:
packet.src_addr, packet.src_port = server
packet.recalculate_checksums()
else:
print("Warning: Previously unseen connection from proxy to %s:%s." % client)
self.response_handle.send(packet, recalculate_checksum=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Windows Transparent Proxy"
)
parser.add_argument(
'--mode',
choices=[
'forward',
'local',
'both'],
default="both",
help='redirection operation mode: "forward" to only redirect forwarded packets, '
'"local" to only redirect packets originating from the local machine')
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--redirect-ports",
nargs="+",
type=int,
default=[
80,
443],
metavar="80",
help="ports that should be forwarded to the proxy")
group.add_argument(
"--custom-filter",
default=None,
metavar="WINDIVERT_FILTER",
help="Custom WinDivert interception rule.")
parser.add_argument("--proxy-addr", default=False,
help="Proxy Server Address")
parser.add_argument("--proxy-port", type=int, default=8080,
help="Proxy Server Port")
parser.add_argument("--api-host", default="localhost",
help="API hostname to bind to")
parser.add_argument("--api-port", type=int, default=PROXY_API_PORT,
help="API port")
parser.add_argument("--cache-size", type=int, default=65536,
help="Maximum connection cache size")
options = parser.parse_args()
proxy = TransparentProxy(**vars(options))
proxy.start()
print(" * Transparent proxy active.")
print(" Filter: {0}".format(proxy.request_filter))
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print(" * Shutting down...")
proxy.shutdown()
print(" * Shut down.")
|
HVAC_Controller.py
|
"""
Hi Welcome to the HVAC controller of the....
______ ______ _ _ _ _____ _
| ___ \ | ___ \ (_) | | | / ___| | |
| |_/ / __ _ ___ ___ | |_/ /_ _ _| | __| | \ `--. _ _ ___| |_ ___ _ __ ___
| ___ \/ _` / __|/ _ \ | ___ \ | | | | |/ _` | `--. \ | | / __| __/ _ \ '_ ` _ \
| |_/ / (_| \__ \ __/ | |_/ / |_| | | | (_| | /\__/ / |_| \__ \ || __/ | | | | |
\____/ \__,_|___/\___| \____/ \__,_|_|_|\__,_| \____/ \__, |___/\__\___|_| |_| |_|
__/ |
|___/
For the active building model.
####################################################################################
## This script is designed to control the following sub systems of the AB Model: ##
## - HVAC ##
####################################################################################
Developer Info:
Name: Walter Bassage
Email: w.bassage@sheffield.ac.uk
Code Version: 0.1v
"""
###############################-- Import Libaries --##################################
## For this network I have used a simple soctet connection to setup a newtork that ##
######################################################################################
import os
import sys
import time
import errno
import serial
import socket
import logging
import threading
import configparser
from time import sleep
from queue import Queue
from random import randrange
from co2_monitor import sensros
###############################-- Global Variables --#################################
## For this network I have used a simple soctet connection to setup a newtork that ##
######################################################################################
HEADER_LENGTH = 10 # Variable used when sending messages to server
NUMBER_OF_THREADS = 3 # Number of threads in which I can use
JOB_NUMBER = [1,2,3] # Number of active jobs at any point in run time
queue = Queue()
# Variables used in the creation and recalling settings file
system_file = "client_settings.ini"
config = configparser.ConfigParser()
subsystem_ID = "HVAC"
##################################-- Nework Setup --##################################
## Check conncetion status ##
## Request client restarts ##
## Records data from clients ##
######################################################################################
# This function creates the connection to the server
def connection():
# call the settings files and uses the Stored IP and HOST
config.read(system_file)
config.sections()
# Mannal Update on IP and Username
subsystem_ID = "HVAC"
host = '10.0.0.10'
port = 1234
# Use global to allow the veriable use outside the function
global client_socket
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((host, port))
# Prevents the recvie function not to be blocking
client_socket.setblocking(False)
username = subsystem_ID.encode('utf-8')
username_header = f"{len(username):<{HEADER_LENGTH}}".encode('utf-8')
client_socket.send(username_header + username)
print("Connection establised with server")
###############################-- Global Variables --#################################
## For this network I have used a simple soctet connection to setup a newtork that ##
######################################################################################
def main_sensor():
no_messages = 0 # Variable is used to count failed attempts to recive message from adrino sensors
while True:
# This loop will request 1, 2, and 3 each of these corisponds with a function on the adrino to either send co2, temp, and humidity readings
for x in range(3):
num = str(x+1)
message = sensros(num)
time.sleep(0.10)
# Checks to see if recived message isn't blank
if not message:
no_messages = no_messages + 1
print("Not sending messages. Failure request no: " + str(no_messages))
# If the sensors stops reciving data for 10 rounds it will shutdown and require a restart
if no_messages >= 10:
send_data("ERROR: Sensor has stopped sending data please check connection")
break
else:
time.sleep(0.30)
# If all is good the recived message is sent.
else:
no_messages = 0
send_data(message)
def self_manage():
while True:
try:
sleep(15)
continue
except IOError as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print('Reading error', str(e))
continue
except Exception as e:
print('General error', str(e))
sys.exit()
def security_protocalls():
print("Security not active by default")
###############################-- Client Commands --##################################
## Check conncetion status ##
## Request client restarts ##
## Records data from clients ##
######################################################################################
def send_data(info):
try:
message = (info)
# Ecryption add here
message = message.encode('utf-8')
message_header = f"{len(message) :< {HEADER_LENGTH}}".encode("utf-8")
client_socket.send(message_header + message)
# sleep(0.5)
except IOError as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print('Reading error', str(e))
error_code = str(e)
if error_code == "[Errno 32] Broken pipe":
# Protocal that makes the HVAC system manage itself while disconnected from the controller
self_manage()
sys.exit()
except Exception as e:
print('General error', str(e))
sys.exit()
###############################-- Recive Commands --##################################
## Function used for reciving commands from the connected server currently not used ##
######################################################################################
def recive_commands():
while True:
try:
# We recive things
username_header = client_socket.recv(HEADER_LENGTH)
if not len(username_header):
print("connction closed by the server")
sys.exit()
username_length = int(username_header.decode("utf-8").strip())
username = client_socket.recv(username_length).decode("utf-8")
message_header = client_socket.recv(HEADER_LENGTH)
message_length = int(message_header.decode("utf-8").strip())
message = client_socket.recv(message_length).decode("utf-8")
# Decryption add here (Implment when needed)
print(f"{username}> {message}")
# catch errors related to the communication processs and connection.
except IOError as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print('Reading error', str(e))
error_code = str(e)
if error_code == "[Errno 32] Broken pipe":
self_manage()
sys.exit()
continue
except Exception as e:
print('General error', str(e))
sys.exit()
###########################-- Multithreading Setup --#################################
## Main Setup that will be run on start up of script ##
######################################################################################
# Create worker threads
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
# Do next that is in the queue
# - First thread handles connections
# - Second Thread collects readings sends to server) (main_sensor)
# - Third Thread implements security (security_protocalls)
def work():
while True:
x = queue.get()
if x == 1:
main_sensor()
if x == 2:
security_protocalls()
queue.task_done()
def create_jobs():
for x in JOB_NUMBER:
queue.put(x)
queue.join()
################################-- Main Setup --######################################
## Main Setup that will be run on start up of script ##
######################################################################################
if __name__ == "__main__":
connection()
create_workers()
create_jobs()
|
parasol.py
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import subprocess
import sys
import tempfile
import time
from argparse import ArgumentParser, _ArgumentGroup
from queue import Empty, Queue
from shutil import which
from threading import Thread
from typing import Optional, Union, Dict
from toil.batchSystems.abstractBatchSystem import (BatchSystemSupport,
UpdatedBatchJobInfo)
from toil.common import SYS_MAX_SIZE, Toil
from toil.test import get_temp_file
from toil.lib.iterables import concat
logger = logging.getLogger(__name__)
class ParasolBatchSystem(BatchSystemSupport):
"""
The interface for Parasol.
"""
@classmethod
def supportsWorkerCleanup(cls):
return False
@classmethod
def supportsAutoDeployment(cls):
return False
def __init__(self, config, maxCores, maxMemory, maxDisk):
super().__init__(config, maxCores, maxMemory, maxDisk)
if maxMemory != SYS_MAX_SIZE:
logger.warning('The Parasol batch system does not support maxMemory.')
# Keep the name of the results file for the pstat2 command..
command = config.parasolCommand
if os.path.sep not in command:
try:
command = which(command)
except StopIteration:
raise RuntimeError("Can't find %s on PATH." % command)
logger.debug('Using Parasol at %s', command)
self.parasolCommand = command
jobStoreType, path = Toil.parseLocator(config.jobStore)
if jobStoreType != 'file':
raise RuntimeError("The parasol batch system doesn't currently work with any "
"jobStore type except file jobStores.")
self.parasolResultsDir = tempfile.mkdtemp(dir=os.path.abspath(path))
logger.debug("Using parasol results dir: %s", self.parasolResultsDir)
# In Parasol, each results file corresponds to a separate batch, and all jobs in a batch
# have the same cpu and memory requirements. The keys to this dictionary are the (cpu,
# memory) tuples for each batch. A new batch is created whenever a job has a new unique
# combination of cpu and memory requirements.
self.resultsFiles = dict()
self.maxBatches = config.parasolMaxBatches
# Allows the worker process to send back the IDs of jobs that have finished, so the batch
# system can decrease its used cpus counter
self.cpuUsageQueue = Queue()
# Also stores finished job IDs, but is read by getUpdatedJobIDs().
self.updatedJobsQueue = Queue()
# Use this to stop the worker when shutting down
self.running = True
self.worker = Thread(target=self.updatedJobWorker, args=())
self.worker.start()
self.usedCpus = 0
self.jobIDsToCpu = {}
# Set of jobs that have been issued but aren't known to have finished or been killed yet.
# Jobs that end by themselves are removed in getUpdatedJob, and jobs that are killed are
# removed in killBatchJobs.
self.runningJobs = set()
def _runParasol(self, command, autoRetry=True):
"""
Issues a parasol command using popen to capture the output. If the command fails then it
will try pinging parasol until it gets a response. When it gets a response it will
recursively call the issue parasol command, repeating this pattern for a maximum of N
times. The final exit value will reflect this.
"""
command = list(concat(self.parasolCommand, command))
while True:
logger.debug('Running %r', command)
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=-1)
stdout, stderr = process.communicate()
status = process.wait()
for line in stderr.decode('utf-8').split('\n'):
if line: logger.warning(line)
if status == 0:
return 0, stdout.decode('utf-8').split('\n')
message = 'Command %r failed with exit status %i' % (command, status)
if autoRetry:
logger.warning(message)
else:
logger.error(message)
return status, None
logger.warning('Waiting for a 10s, before trying again')
time.sleep(10)
parasolOutputPattern = re.compile("your job ([0-9]+).*")
def issueBatchJob(self, jobDesc, job_environment: Optional[Dict[str, str]] = None):
"""
Issues parasol with job commands.
"""
self.checkResourceRequest(jobDesc.memory, jobDesc.cores, jobDesc.disk)
MiB = 1 << 20
truncatedMemory = jobDesc.memory // MiB * MiB
# Look for a batch for jobs with these resource requirements, with
# the memory rounded down to the nearest megabyte. Rounding down
# meams the new job can't ever decrease the memory requirements
# of jobs already in the batch.
if len(self.resultsFiles) >= self.maxBatches:
raise RuntimeError('Number of batches reached limit of %i' % self.maxBatches)
try:
results = self.resultsFiles[(truncatedMemory, jobDesc.cores)]
except KeyError:
results = get_temp_file(rootDir=self.parasolResultsDir)
self.resultsFiles[(truncatedMemory, jobDesc.cores)] = results
# Prefix the command with environment overrides, optionally looking them up from the
# current environment if the value is None
command = ' '.join(concat('env', self.__environment(job_environment), jobDesc.command))
parasolCommand = ['-verbose',
'-ram=%i' % jobDesc.memory,
'-cpu=%i' % jobDesc.cores,
'-results=' + results,
'add', 'job', command]
# Deal with the cpus
self.usedCpus += jobDesc.cores
while True: # Process finished results with no wait
try:
jobID = self.cpuUsageQueue.get_nowait()
except Empty:
break
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
assert self.usedCpus >= 0
while self.usedCpus > self.maxCores: # If we are still waiting
jobID = self.cpuUsageQueue.get()
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
assert self.usedCpus >= 0
# Now keep going
while True:
line = self._runParasol(parasolCommand)[1][0]
match = self.parasolOutputPattern.match(line)
if match is None:
# This is because parasol add job will return success, even if the job was not
# properly issued!
logger.debug('We failed to properly add the job, we will try again after a 5s.')
time.sleep(5)
else:
jobID = int(match.group(1))
self.jobIDsToCpu[jobID] = jobDesc.cores
self.runningJobs.add(jobID)
logger.debug(f"Got the parasol job id: {jobID} from line: {line}")
return jobID
def setEnv(self, name, value=None):
if value and ' ' in value:
raise ValueError('Parasol does not support spaces in environment variable values.')
return super().setEnv(name, value)
def __environment(self, job_environment: Optional[Dict[str, str]] = None):
environment = self.environment.copy()
if job_environment:
environment.update(job_environment)
return (k + '=' + (os.environ[k] if v is None else v) for k, v in list(environment.items()))
def killBatchJobs(self, jobIDs):
"""Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs.
"""
while True:
for jobID in jobIDs:
if jobID in self.runningJobs:
self.runningJobs.remove(jobID)
exitValue = self._runParasol(['remove', 'job', str(jobID)],
autoRetry=False)[0]
logger.debug("Tried to remove jobID: %i, with exit value: %i" % (jobID, exitValue))
runningJobs = self.getIssuedBatchJobIDs()
if set(jobIDs).difference(set(runningJobs)) == set(jobIDs):
break
logger.warning('Tried to kill some jobs, but something happened and they are still '
'going, will try again in 5s.')
time.sleep(5)
# Update the CPU usage, because killed jobs aren't written to the results file.
for jobID in jobIDs:
if jobID in list(self.jobIDsToCpu.keys()):
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
runningPattern = re.compile(r'r\s+([0-9]+)\s+[\S]+\s+[\S]+\s+([0-9]+)\s+[\S]+')
def getJobIDsForResultsFile(self, resultsFile):
"""
Get all queued and running jobs for a results file.
"""
jobIDs = []
for line in self._runParasol(['-extended', 'list', 'jobs'])[1]:
fields = line.strip().split()
if len(fields) == 0 or fields[-1] != resultsFile:
continue
jobID = fields[0]
jobIDs.append(int(jobID))
return set(jobIDs)
def getIssuedBatchJobIDs(self):
"""
Gets the list of jobs issued to parasol in all results files, but not including jobs
created by other users.
"""
issuedJobs = set()
for resultsFile in self.resultsFiles.values():
issuedJobs.update(self.getJobIDsForResultsFile(resultsFile))
return list(issuedJobs)
def getRunningBatchJobIDs(self):
"""
Returns map of running jobIDs and the time they have been running.
"""
# Example lines..
# r 5410186 benedictpaten worker 1247029663 localhost
# r 5410324 benedictpaten worker 1247030076 localhost
runningJobs = {}
issuedJobs = self.getIssuedBatchJobIDs()
for line in self._runParasol(['pstat2'])[1]:
if line != '':
match = self.runningPattern.match(line)
if match is not None:
jobID = int(match.group(1))
startTime = int(match.group(2))
if jobID in issuedJobs: # It's one of our jobs
runningJobs[jobID] = time.time() - startTime
return runningJobs
def getUpdatedBatchJob(self, maxWait):
while True:
try:
item = self.updatedJobsQueue.get(timeout=maxWait)
except Empty:
return None
try:
self.runningJobs.remove(item.jobID)
except KeyError:
# We tried to kill this job, but it ended by itself instead, so skip it.
pass
else:
return item
def updatedJobWorker(self):
"""
We use the parasol results to update the status of jobs, adding them
to the list of updated jobs.
Results have the following structure.. (thanks Mark D!)
int status; /* Job status - wait() return format. 0 is good. */
char *host; /* Machine job ran on. */
char *jobId; /* Job queuing system job ID */
char *exe; /* Job executable file (no path) */
int usrTicks; /* 'User' CPU time in ticks. */
int sysTicks; /* 'System' CPU time in ticks. */
unsigned submitTime; /* Job submission time in seconds since 1/1/1970 */
unsigned startTime; /* Job start time in seconds since 1/1/1970 */
unsigned endTime; /* Job end time in seconds since 1/1/1970 */
char *user; /* User who ran job */
char *errFile; /* Location of stderr file on host */
Plus you finally have the command name.
"""
resultsFiles = set()
resultsFileHandles = []
try:
while self.running:
# Look for any new results files that have been created, and open them
newResultsFiles = set(os.listdir(self.parasolResultsDir)).difference(resultsFiles)
for newFile in newResultsFiles:
newFilePath = os.path.join(self.parasolResultsDir, newFile)
resultsFileHandles.append(open(newFilePath))
resultsFiles.add(newFile)
for fileHandle in resultsFileHandles:
while self.running:
line = fileHandle.readline()
if not line:
break
assert line[-1] == '\n'
(status, host, jobId, exe, usrTicks, sysTicks, submitTime, startTime,
endTime, user, errFile, command) = line[:-1].split(None, 11)
status = int(status)
jobId = int(jobId)
if os.WIFEXITED(status):
status = os.WEXITSTATUS(status)
else:
status = -status
self.cpuUsageQueue.put(jobId)
startTime = int(startTime)
endTime = int(endTime)
if endTime == startTime:
# Both, start and end time is an integer so to get sub-second
# accuracy we use the ticks reported by Parasol as an approximation.
# This isn't documented but what Parasol calls "ticks" is actually a
# hundredth of a second. Parasol does the unit conversion early on
# after a job finished. Search paraNode.c for ticksToHundreths. We
# also cheat a little by always reporting at least one hundredth of a
# second.
usrTicks = int(usrTicks)
sysTicks = int(sysTicks)
wallTime = float(max(1, usrTicks + sysTicks)) * 0.01
else:
wallTime = float(endTime - startTime)
self.updatedJobsQueue.put(UpdatedBatchJobInfo(jobID=jobId, exitStatus=status, wallTime=wallTime, exitReason=None))
time.sleep(1)
except:
logger.warning("Error occurred while parsing parasol results files.")
raise
finally:
for fileHandle in resultsFileHandles:
fileHandle.close()
def shutdown(self):
self.killBatchJobs(self.getIssuedBatchJobIDs()) # cleanup jobs
for results in self.resultsFiles.values():
exitValue = self._runParasol(['-results=' + results, 'clear', 'sick'],
autoRetry=False)[0]
if exitValue is not None:
logger.warning("Could not clear sick status of the parasol batch %s" % results)
exitValue = self._runParasol(['-results=' + results, 'flushResults'],
autoRetry=False)[0]
if exitValue is not None:
logger.warning("Could not flush the parasol batch %s" % results)
self.running = False
logger.debug('Joining worker thread...')
self.worker.join()
logger.debug('... joined worker thread.')
for results in list(self.resultsFiles.values()):
os.remove(results)
os.rmdir(self.parasolResultsDir)
@classmethod
def add_options(cls, parser: Union[ArgumentParser, _ArgumentGroup]) -> None:
parser.add_argument("--parasolCommand", dest="parasolCommand", default='parasol',
help="The name or path of the parasol program. Will be looked up on PATH "
"unless it starts with a slash. (default: %(default)s).")
parser.add_argument("--parasolMaxBatches", dest="parasolMaxBatches", default=1000,
help="Maximum number of job batches the Parasol batch is allowed to create. One batch is "
"created for jobs with a a unique set of resource requirements. (default: %(default)s).")
@classmethod
def setOptions(cls, setOption):
from toil.common import iC
setOption("parasolCommand", None, None, 'parasol')
setOption("parasolMaxBatches", int, iC(1), 10000)
|
pydoc.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages. Port number 0 can be
used to get an arbitrary unused port.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
https://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
result = _encode(result)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- Unicode support helpers
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one that nothing will match, and make
# the _encode function that do nothing.
class _unicode(object):
pass
_encoding = 'ascii'
def _encode(text, encoding='ascii'):
return text
else:
import locale
_encoding = locale.getpreferredencoding()
def _encode(text, encoding=None):
if isinstance(text, unicode):
return text.encode(encoding or _encoding, 'xmlcharrefreplace')
else:
return text
def _binstr(obj):
# Ensure that we have an encoded (binary) string representation of obj,
# even if it is a unicode string.
if isinstance(obj, _unicode):
return obj.encode(_encoding, 'xmlcharrefreplace')
return str(obj)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = module.__doc__.splitlines()[0] if module.__doc__ else None
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object,
basedir=os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"https://docs.python.org/library")
basedir = os.path.normcase(basedir)
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith(("http://", "https://")):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower())
else:
docloc = os.path.join(docloc, object.__name__.lower() + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return _encode('''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta charset="utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents), 'ascii')
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(_binstr(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(_binstr(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(_binstr(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', _binstr(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', _binstr(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', _binstr(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(_encode(text))
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(_encode(text))
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
try:
h = int(os.environ.get('LINES', 0))
except ValueError:
h = 0
if h <= 1:
h = 25
r = inc = h - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if object is None:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Tauthon %s! This is the online help utility.
Enter the name of any module, keyword, or topic to get help on writing
Tauthon programs and using Tauthon modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*1))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = module.__doc__.splitlines()[0] if module.__doc__ else ''
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
self.url = 'http://%s:%d/' % (self.address[0], self.server_port)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
def onerror(modname):
pass
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done),
kwargs=dict(onerror=onerror)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
engine.py
|
"""
"""
import logging
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways = {}
self.engines = {}
self.apps = {}
self.exchanges = []
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any):
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]):
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]):
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self):
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = ""):
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str):
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f" can not find the bottom of the interface :{gateway_name}")
return gateway
def get_engine(self, engine_name: str):
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f" engine is missing :{engine_name}")
return engine
def get_default_setting(self, gateway_name: str):
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self):
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self):
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self):
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str):
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str):
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str):
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_history(self, req: HistoryRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def close(self):
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level = SETTINGS["log.level"]
self.logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self):
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self):
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self):
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self):
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event):
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks = {}
self.orders = {}
self.trades = {}
self.positions = {}
self.accounts = {}
self.contracts = {}
self.active_orders = {}
self.add_function()
self.register_event()
def add_function(self):
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event):
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event):
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event):
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event):
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event):
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol):
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid):
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid):
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid):
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid):
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol):
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self):
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self):
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self):
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self):
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self):
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self):
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = ""):
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread = Thread(target=self.run)
self.queue = Queue()
self.active = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = ""):
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = SETTINGS["email.receiver"]
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self):
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self):
""""""
self.active = True
self.thread.start()
def close(self):
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
loadable_elf_example_test.py
|
import os
import pexpect
import serial
import sys
import threading
import time
try:
import IDF
except ImportError:
test_fw_path = os.getenv('TEST_FW_PATH')
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import Utility
class CustomProcess(object):
def __init__(self, cmd, logfile):
self.f = open(logfile, 'wb')
self.p = pexpect.spawn(cmd, timeout=60, logfile=self.f)
def __enter__(self):
return self
def close(self):
self.p.terminate(force=True)
def __exit__(self, type, value, traceback):
self.close()
self.f.close()
class OCDProcess(CustomProcess):
def __init__(self, proj_path):
cmd = 'openocd -f interface/ftdi/esp32_devkitj_v1.cfg -f board/esp-wroom-32.cfg'
log_file = os.path.join(proj_path, 'openocd.log')
super(OCDProcess, self).__init__(cmd, log_file)
i = self.p.expect_exact(['Info : Listening on port 3333 for gdb connections', 'Error:'])
if i == 0:
Utility.console_log('openocd is listening for gdb connections')
else:
raise RuntimeError('openocd initialization has failed')
def close(self):
try:
self.p.sendcontrol('c')
self.p.expect_exact('shutdown command invoked')
except Exception:
Utility.console_log('openocd needs to be killed', 'O')
super(OCDProcess, self).close()
class GDBProcess(CustomProcess):
def __init__(self, proj_path, elf_path):
cmd = 'xtensa-esp32-elf-gdb -x {} --directory={} {}'.format(os.path.join(proj_path, '.gdbinit.ci'),
os.path.join(proj_path, 'main'),
elf_path)
log_file = os.path.join(proj_path, 'gdb.log')
super(GDBProcess, self).__init__(cmd, log_file)
self.p.sendline('') # it is for "---Type <return> to continue, or q <return> to quit---"
i = self.p.expect_exact(['Thread 1 hit Temporary breakpoint 2, app_main ()',
'Load failed'])
if i == 0:
Utility.console_log('gdb is at breakpoint')
else:
raise RuntimeError('Load failed: probably the ELF file was not built for loading with gdb')
self.p.expect_exact('(gdb)')
def close(self):
try:
self.p.sendline('q')
self.p.expect_exact('Quit anyway? (y or n)')
self.p.sendline('y')
self.p.expect_exact('Ending remote debugging.')
except Exception:
Utility.console_log('gdb needs to be killed', 'O')
super(GDBProcess, self).close()
def break_till_end(self):
self.p.sendline('b esp_restart')
self.p.sendline('c')
self.p.expect_exact('Thread 1 hit Breakpoint 3, esp_restart ()')
class SerialThread(object):
def run(self, log_path, exit_event):
with serial.Serial('/dev/ttyUSB1', 115200) as ser, open(log_path, 'wb') as f:
while True:
f.write(ser.read(ser.in_waiting))
if exit_event.is_set():
break
time.sleep(1)
def __init__(self, log_path):
self.exit_event = threading.Event()
self.t = threading.Thread(target=self.run, args=(log_path, self.exit_event,))
self.t.start()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.exit_event.set()
self.t.join(60)
if self.t.is_alive():
Utility.console_log('The pyserial thread is still alive', 'O')
@IDF.idf_example_test(env_tag="test_jtag_arm")
def test_examples_loadable_elf(env, extra_data):
idf_path = os.environ['IDF_PATH']
rel_project_path = os.path.join('examples', 'get-started', 'hello_world')
proj_path = os.path.join(idf_path, rel_project_path)
elf_path = os.path.join(IDF.Example(rel_project_path).get_binary_path(rel_project_path), 'hello-world.elf')
esp_log_path = os.path.join(proj_path, 'esp.log')
with SerialThread(esp_log_path):
with OCDProcess(proj_path), GDBProcess(proj_path, elf_path) as gdb:
gdb.break_till_end()
if pexpect.run('grep "Restarting now." {}'.format(esp_log_path), withexitstatus=True)[1]:
raise RuntimeError('Expected output from ESP was not received')
if __name__ == '__main__':
test_examples_loadable_elf()
|
bench-encoding.py
|
from turbojpeg import TurboJPEG
from nvjpeg import NvJpeg
import cv2
import time
from functools import partial
from threading import Thread
from queue import Queue
import gc
import argparse
class Timer:
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
class CvJpeg(object):
def encode(self, image):
result, compressed = cv2.imencode('.jpg', image)
class Threaded(object):
def __init__(self, create_jpeg, size=8):
# Image file writers
self.queue = Queue(size)
self.threads = [Thread(target=self.encode_thread, args=())
for _ in range(size)]
self.create_jpeg = create_jpeg
for t in self.threads:
t.start()
def encode_thread(self):
jpeg = self.create_jpeg()
item = self.queue.get()
while item is not None:
image, quality = item
result = jpeg.encode(image, quality)
item = self.queue.get()
def encode(self, image, quality=90):
self.queue.put((image, quality))
def stop(self):
for _ in self.threads:
self.queue.put(None)
for t in self.threads:
t.join()
def bench_threaded(create_encoder, images, threads):
threads = Threaded(create_encoder, threads)
with Timer() as t:
for image in images:
threads.encode(image)
threads.stop()
return len(images) / t.interval
def bench_encoder(create_encoder, images):
encoder = create_encoder()
with Timer() as t:
for image in images:
encoder.encode(image)
return len(images) / t.interval
def main(args):
image = cv2.imread(args.filename, cv2.IMREAD_COLOR)
images = [image] * args.n
num_threads = args.j
print(f'turbojpeg threaded j={num_threads}: {bench_threaded(TurboJPEG, images, num_threads):>5.1f} images/s')
print(f'nvjpeg: {bench_threaded(NvJpeg, images, 1):>5.1f} images/s')
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Jpeg encoding benchmark.')
parser.add_argument('filename', type=str, help='filename of image to use')
parser.add_argument('-j', default=6, type=int, help='run multi-threaded')
parser.add_argument('-n', default=100, type=int, help='number of images to encode')
args = parser.parse_args()
main(args)
gc.collect()
# main(args)
|
httpclient_test.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest
from tornado.util import u, bytes_type
from tornado.web import Application, RequestHandler, url
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class ChunkHandler(RequestHandler):
def get(self):
self.write("asdf")
self.flush()
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
], gzip=True)
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes_type)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1)
self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.startswith('Content-Type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent')
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
client.close()
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
try:
yield self.http_client.fetch(self.get_url('/notfound'))
except HTTPError as e:
self.assertEqual(e.code, 404)
self.assertEqual(e.response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
server = HTTPServer(app, io_loop=self.server_ioloop)
server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
self.server_ioloop.add_callback(self.server_ioloop.stop)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://localhost:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
|
harvest.py
|
from multiprocessing import Process, Manager
import time
import os
import sys
import harvester
# Set Global Variables
URL_SOURCE = 'URI.txt'
if len(sys.argv) > 1:
URL_SOURCE = sys.argv[1]
AUTO_PROCESS_OVERFLOW = True
DATABASE_FILE = 'data/ld-database.db'
DATABASE_TEMPLATE = 'database/create_database.sql'
WORK_QUEUE_OVERFLOW_FILE = 'data/{}_overflow.txt'.format(URL_SOURCE.split('/')[-1])
SCHEMA_INTEGRITY_CHECK = True
CRAWL_RECORD_REPAIR = True
RESPONSE_TIMEOUT = 60
MAX_REDIRECTS = 3
KILL_PROCESSES_TIMEOUT = 600
RECURSION_DEPTH_LIMIT = 3
PROC_COUNT = 16
COMMIT_FREQ = 50
WORK_QUEUE_MAX_SIZE = 1000000
RESP_QUEUE_MAX_SIZE = 1000000
RDF_MEDIA_TYPES = [
"application/rdf+xml",
"text/turtle",
"application/n-triples",
"application/ld+json",
"application/owl+xml",
"text/trig",
"application/n-quads"
]
RDF_FORMATS = [
'rdf',
'owl',
'ttl',
'n3',
'nt',
'json'
]
GLOBAL_HEADER = {
'Accept': ",".join(RDF_MEDIA_TYPES),
'User-Agent': 'LD Link Harvester'
}
BLACKLIST_FORMATS = [
'jpg',
'JPG',
'BMP',
'bmp',
'png',
'PNG',
'jpeg',
'JPEG',
'MP4',
'mp4',
'flv',
'pdf',
'PDF',
'eps',
'EPS',
'svg',
'SVG'
]
#Override Harvester Module Settings With User Selected Values
global_vars = globals().copy()
for var in global_vars:
if var in locals() or var in globals():
if var == 'DATABASE_TEMPLATE':
harvester.DATABASE_TEMPLATE = DATABASE_TEMPLATE
elif var == 'WORK_QUEUE_OVERFLOW_FILE':
harvester.WORK_QUEUE_OVERFLOW_FILE = WORK_QUEUE_OVERFLOW_FILE
elif var == 'AUTO_PROCESS_OVERFLOW':
harvester.AUTO_PROCESS_OVERFLOW = AUTO_PROCESS_OVERFLOW
elif var == 'DATABASE_FILE':
harvester.DATABASE_FILE = DATABASE_FILE
elif var == 'SCHEMA_INTEGRITY_CHECK':
harvester.SCHEMA_INTEGRITY_CHECK = SCHEMA_INTEGRITY_CHECK
elif var == 'CRAWL_RECORD_REPAIR':
harvester.CRAWL_RECORD_REPAIR = CRAWL_RECORD_REPAIR
elif var == 'RESPONSE_TIMEOUT':
harvester.RESPONSE_TIMEOUT = RESPONSE_TIMEOUT
elif var == 'KILL_PROCESSES_TIMEOUT':
harvester.KILL_PROCESSES_TIMEOUT = KILL_PROCESSES_TIMEOUT
elif var == 'MAX_REDIRECTS':
harvester.MAX_REDIRECTS = MAX_REDIRECTS
elif var == 'RECURSION_DEPTH_LIMIT':
harvester.RECURSION_DEPTH_LIMIT = RECURSION_DEPTH_LIMIT
elif var == 'PROC_COUNT':
harvester.PROC_COUNT = PROC_COUNT
elif var == 'COMMIT_FREQ':
harvester.COMMIT_FREQ = COMMIT_FREQ
elif var == 'WORK_QUEUE_MAX_SIZE':
harvester.WORK_QUEUE_MAX_SIZE = WORK_QUEUE_MAX_SIZE
elif var == 'RESP_QUEUE_MAX_SIZE':
harvester.RESP_QUEUE_MAX_SIZE = RESP_QUEUE_MAX_SIZE
elif var == 'RDF_MEDIA_TYPES':
harvester.RDF_MEDIA_TYPES = RDF_MEDIA_TYPES
elif var == 'RDF_FORMATS':
harvester.RDF_FORMATS = RDF_FORMATS
elif var == 'GLOBAL_HEADER':
harvester.GLOBAL_HEADER = GLOBAL_HEADER
elif var == 'BLACKLIST_FORMATS':
harvester.BLACKLIST_FORMATS = BLACKLIST_FORMATS
if __name__ == "__main__":
"""
Main runtime script. Essentially calls on the functions as appropriate. Handles workers, and processes contents of the response queue.
"""
URL_BATCH = [(url.strip(), 0, url.strip()) for url in open(URL_SOURCE)]
dbconnector, crawlid = harvester.connect(DATABASE_FILE)
if SCHEMA_INTEGRITY_CHECK:
if harvester.verify_database(dbconnector, DATABASE_TEMPLATE):
print("Database schema integrity has been verified.")
else:
print("Error, database schema does not match the provided template.")
exit(1)
if CRAWL_RECORD_REPAIR:
repairs_required, repairs_made = dbconnector.self_repair_crawl_periods()
if repairs_required != 0:
print("Repairing Crawl records.\nRepairs Required: {}\nRepairs Made: {}".format(repairs_required, repairs_made))
else:
print("No Crawl record repairs are required.")
print("Adding seeds to database.")
dbconnector.insert_seed_bulk(URL_BATCH)
dbconnector.commit()
print("Seeds added to database.")
full_msg = False
manager = Manager()
visited = manager.dict()
work_queue = manager.Queue(maxsize=WORK_QUEUE_MAX_SIZE)
work_queue = harvester.add_bulk_to_work_queue(work_queue, URL_BATCH)
resp_queue = manager.Queue(maxsize=RESP_QUEUE_MAX_SIZE)
begin = time.time()
while True:
worker_procs = []
for i in range(PROC_COUNT):
p = Process(target=harvester.worker_fn, args=(i+1, work_queue, resp_queue, visited))
worker_procs.append(p)
[p.start() for p in worker_procs]
# wait for processes to start
time.sleep(0.1)
threads_started = 0
threads_ended = 0
i = 0
emergency_timeout_start = time.time()
emergency_timeout = False
while True:
if not resp_queue.empty():
emergency_timeout_start = time.time()
#print(resp_queue.qsize())
if i >= COMMIT_FREQ:
dbconnector.commit()
i =- 1
i += 1
try:
resp_tuple = resp_queue.get(timeout=KILL_PROCESSES_TIMEOUT)
except:
print("FROZEN. Emergency Timeout: Empty Response Queue.")
break
if resp_tuple == harvester.start_sentinel:
threads_started += 1
continue
elif resp_tuple == harvester.end_sentinel:
threads_ended += 1
if threads_ended == PROC_COUNT:
break
else:
continue
if isinstance(resp_tuple[0], dict):
'''
OPCODES:
0 = Insert Seed (Deprecated)
1 = Insert Failed Seed (Handled by 2)
2 = Insert Link (Failed or otherwise)
3 = Insert RDF Data
'''
opcode = resp_tuple[0]['opcode']
if resp_tuple[0]['url'] == resp_tuple[0]['params']['source']:
dbconnector.insert_crawl_seed(uri=resp_tuple[0]['url'], crawlid=crawlid)
if opcode == 2:
dbconnector.insert_link(uri=resp_tuple[0]['url'], crawlid=crawlid, source=resp_tuple[0]['params']['source'], content_format=resp_tuple[0]['params']['format'], failed=resp_tuple[0]['params']['failed'])
if resp_tuple[0]['params']['failed'] == 1 and resp_tuple[0]['url'] == resp_tuple[0]['params']['source']:
if isinstance(resp_tuple[1], Exception):
dbconnector.insert_failed_seed(uri=resp_tuple[0]['url'], crawlid=crawlid, code='000')
else:
dbconnector.insert_failed_seed(uri=resp_tuple[0]['url'], crawlid=crawlid, code=resp_tuple[1].status_code)
if opcode == 3:
dbconnector.insert_link(uri=resp_tuple[0]['url'], crawlid=crawlid, source=resp_tuple[0]['params']['source'],content_format=resp_tuple[0]['params']['format'], failed=0)
dbconnector.insert_valid_rdfuri(uri=resp_tuple[0]['url'], crawlid=crawlid, source=resp_tuple[0]['params']['source'], response_format=resp_tuple[0]['params']['format'])
if isinstance(resp_tuple[1], Exception):
print("{} : {}".format(str(resp_tuple[0]['url']), str(resp_tuple[1])))
else:
print("{} : {}".format(str(resp_tuple[0]['url']), str(resp_tuple[1].status_code)))
if time.time() - emergency_timeout_start > KILL_PROCESSES_TIMEOUT:
print("FROZEN. Emergency Timeout.")
emergency_timeout = True
break
if not emergency_timeout:
[p.join() for p in worker_procs]
else:
[p.terminate() for p in worker_procs]
if not work_queue.empty():
emergency_timeout = False
continue
if time.time() - emergency_timeout_start > KILL_PROCESSES_TIMEOUT:
print("FROZEN. Emergency Timeout.")
emergency_timeout = True
[p.terminate() for p in worker_procs]
break
if not AUTO_PROCESS_OVERFLOW:
break
else:
if os.path.isfile(WORK_QUEUE_OVERFLOW_FILE):
new_urls = [(url.split()[0], int(url.split()[1]), url.split()[2]) for url in open(WORK_QUEUE_OVERFLOW_FILE, 'r')]
open(WORK_QUEUE_OVERFLOW_FILE, 'w').close()
if len(new_urls) > 0:
harvester.add_bulk_to_work_queue(work_queue, new_urls, visited)
continue
else:
break
else:
break
end = time.time()
harvester.close(dbconnector, crawlid)
print("Duration: {} seconds".format(end - begin))
|
main.py
|
import socket
import utils
import threading
HOST = '127.0.0.1' # IP do servidor
SERVER_PORT = 5000 # Porta onde o servidor está escutando
STREAM_HOST = '127.0.0.1' # IP do servidor de streaming
STREAM_PORT = 5555 # Porta do servidor com o streaming
# def init_logs():
# global logged_users
# logged_users = set()
def logout(user_name):
"""
desloga o usuario
:param user_name:
:return 'SAIR_DA_APP_ACK':
"""
i = 0
for user in utils.logged_users:
if user_name == user[0]:
del utils.logged_users[i]
break
i += 1
return 'SAIR_DA_APP_ACK'
def login(user_name, user_ip):
"""
Se achar o usuario no bd, manda a mensagem 'STATUS_DO_USUARIO',
informando ID, tipo de servico e membros do grupo.
Caso contrario, cria um no bd e envia 'ENTRAR_NA_APP_ACK' com uma mensagem de confirmacao da criacao.
:param user_ip:
:param user_name:
:return 'STATUS_DO_USUARIO {user} {bool(premium)}' ou 'ENTRAR_NA_APP_ACK' :
"""
for user in utils.logged_users:
if user[0] == user_name:
return 'USUARIO_JA_LOGADO'
utils.logged_users.append((user_name, user_ip))
return utils.entrar_na_app(user_name)
def get_user_information(user_name):
"""
Pega todas as informacoes do usuario e envia para o servidor de streaming
:param user_name:
:return 'USER_INFORMATION {user} {boll(premium)}':
"""
return utils.get_user_information(user_name)
def upgrade(user_name):
return utils.upgrade_user(user_name)
def create_connection(host, port):
"""
Cria um socket TCP e espera conexoes com clientes, quando uma conexao é aceita,
cria uma thread para o cliente para troca de mensagens
:param host:
:param port:
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen()
while True:
print("Esperando conexao...")
conn, ip = s.accept()
print('GOT CONNECTION FROM:', ip)
thread = threading.Thread(target=threaded_client, args=(conn,))
thread.start()
def threaded_client(conn):
"""
Espera receber uma mensagem do cliente e chama uma funcao diferente para cada tipo de mensagem
:param conn:
:return:
"""
client_name = ''
while conn:
data_byte = conn.recv(1024)
data_string = data_byte.decode()
data = data_string.split(" ")
msg = data[0]
if msg == 'GET_USER_INFORMATION':
message = get_user_information(data[1])
elif msg == 'ENTRAR_NA_APP':
message = login(data[1], data[2])
client_name = data[1]
elif msg == 'SAIR_DA_APP':
message = logout(client_name)
elif msg == 'CRIAR_GRUPO':
message = utils.criar_grupo(client_name)
elif msg == 'ADD_USUARIO_GRUPO':
message = utils.add_grupo(client_name, data[1])
elif msg == 'REMOVER_USUARIO_GRUPO':
message = utils.remover_usr_grupo(client_name, data[1])
elif msg == 'VER_GRUPO':
# message = utils.ver_grupo(client_name)
message = utils.get_grupo(client_name)
elif msg == "UPGRADE":
message = upgrade(client_name)
else:
print("Mensagem invalida")
continue
conn.sendall(message.encode())
if msg == 'SAIR_DA_APP':
return
def main():
"""
cria duas threads, uma para a conexao com o servidor de streaming
e outra para a conexao com os clientes
:return:
"""
utils.init_db_engine()
stream_thread = threading.Thread(target=create_connection, args=(STREAM_HOST, STREAM_PORT))
stream_thread.start()
client_thread = threading.Thread(target=create_connection, args=(HOST, SERVER_PORT))
client_thread.start()
if __name__ == "__main__":
main()
|
test_radius.py
|
# RADIUS tests
# Copyright (c) 2013-2016, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import binascii
import hashlib
import hmac
import logging
logger = logging.getLogger()
import os
import select
import struct
import subprocess
import threading
import time
import hostapd
from utils import HwsimSkip, require_under_vm, skip_with_fips, fail_test
from test_ap_hs20 import build_dhcp_ack
from test_ap_ft import ft_params1
def connect(dev, ssid, wait_connect=True):
dev.connect(ssid, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=wait_connect)
def test_radius_auth_unreachable(dev, apdev):
"""RADIUS Authentication server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_port'] = "18139"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
if int(mib["radiusAuthClientPendingRequests"]) < 1:
raise Exception("Missing pending RADIUS Authentication request")
def test_radius_auth_unreachable2(dev, apdev):
"""RADIUS Authentication server unreachable (2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.17"
params['auth_server_port'] = "18139"
hapd = hostapd.add_ap(apdev[0], params)
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
def test_radius_auth_unreachable3(dev, apdev):
"""RADIUS Authentication server initially unreachable, but then available"""
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.18"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('auth_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
def test_radius_acct_unreachable(dev, apdev):
"""RADIUS Accounting server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 2:
raise Exception("Missing RADIUS Accounting retransmissions")
if int(mib["radiusAccClientPendingRequests"]) < 2:
raise Exception("Missing pending RADIUS Accounting requests")
def test_radius_acct_unreachable2(dev, apdev):
"""RADIUS Accounting server unreachable(2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 1 and int(mib["radiusAccClientPendingRequests"]) < 1:
raise Exception("Missing pending or retransmitted RADIUS Accounting requests")
def test_radius_acct_unreachable3(dev, apdev):
"""RADIUS Accounting server initially unreachable, but then available"""
require_under_vm()
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.18"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('acct_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
time.sleep(1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalResponses'])
req_e = int(as_mib_end['radiusAccServTotalResponses'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_unreachable4(dev, apdev):
"""RADIUS Accounting server unreachable and multiple STAs"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
for i in range(20):
connect(dev[0], "radius-acct")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_radius_acct(dev, apdev):
"""RADIUS Accounting"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_auth_req_attr'] = [ "126:s:Operator", "77:s:testing" ]
params['radius_acct_req_attr'] = [ "126:s:Operator", "77:s:testing" ]
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
dev[2].connect("radius-acct", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
logger.info("Checking for RADIUS counters")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 3:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_non_ascii_ssid(dev, apdev):
"""RADIUS Accounting and non-ASCII SSID"""
params = hostapd.wpa2_eap_params()
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
ssid2 = "740665007374"
params['ssid2'] = ssid2
hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid2=ssid2, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef")
def test_radius_acct_pmksa_caching(dev, apdev):
"""RADIUS Accounting with PMKSA caching"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
for d in [ dev[0], dev[1] ]:
d.request("REASSOCIATE")
d.wait_connected(timeout=15, error="Reassociation timed out")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 4:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_interim(dev, apdev):
"""RADIUS Accounting interim update"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS counters")
as_mib_start = as_hapd.get_mib(param="radius_server")
time.sleep(3.1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 3:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable(dev, apdev):
"""RADIUS Accounting interim update with unreachable server"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(3.1)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable2(dev, apdev):
"""RADIUS Accounting interim update with unreachable server (retry)"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
# Use long enough interim update interval to allow RADIUS retransmission
# case (3 seconds) to trigger first.
params['radius_acct_interim_interval'] = "4"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(7.5)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_ipaddr(dev, apdev):
"""RADIUS Accounting and Framed-IP-Address"""
try:
_test_radius_acct_ipaddr(dev, apdev)
finally:
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'down'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delbr', 'ap-br0'],
stderr=open('/dev/null', 'w'))
def _test_radius_acct_ipaddr(dev, apdev):
params = { "ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'proxy_arp': '1',
'ap_isolate': '1',
'bridge': 'ap-br0' }
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
try:
hapd.enable()
except:
# For now, do not report failures due to missing kernel support
raise HwsimSkip("Could not start hostapd - assume proxyarp not supported in kernel version")
bssid = apdev[0]['bssid']
subprocess.call(['brctl', 'setfd', 'ap-br0', '0'])
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'up'])
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
addr0 = dev[0].own_addr()
pkt = build_dhcp_ack(dst_ll="ff:ff:ff:ff:ff:ff", src_ll=bssid,
ip_src="192.168.1.1", ip_dst="255.255.255.255",
yiaddr="192.168.1.123", chaddr=addr0)
if "OK" not in hapd.request("DATA_TEST_FRAME ifname=ap-br0 " + binascii.hexlify(pkt)):
raise Exception("DATA_TEST_FRAME failed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.disable()
def send_and_check_reply(srv, req, code, error_cause=0):
reply = srv.SendPacket(req)
logger.debug("RADIUS response from hostapd")
for i in reply.keys():
logger.debug("%s: %s" % (i, reply[i]))
if reply.code != code:
raise Exception("Unexpected response code")
if error_cause:
if 'Error-Cause' not in reply:
raise Exception("Missing Error-Cause")
if reply['Error-Cause'][0] != error_cause:
raise Exception("Unexpected Error-Cause: {}".format(reply['Error-Cause']))
def test_radius_acct_psk(dev, apdev):
"""RADIUS Accounting - PSK"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", psk="12345678", scan_freq="2412")
def test_radius_acct_psk_sha256(dev, apdev):
"""RADIUS Accounting - PSK SHA256"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="WPA-PSK-SHA256",
psk="12345678", scan_freq="2412")
def test_radius_acct_ft_psk(dev, apdev):
"""RADIUS Accounting - FT-PSK"""
as_hapd = hostapd.Hostapd("as")
params = ft_params1(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="FT-PSK",
psk="12345678", scan_freq="2412")
def test_radius_acct_ieee8021x(dev, apdev):
"""RADIUS Accounting - IEEE 802.1X"""
skip_with_fips(dev[0])
as_hapd = hostapd.Hostapd("as")
params = hostapd.radius_params()
params["ssid"] = "radius-acct-1x"
params["ieee8021x"] = "1"
params["wep_key_len_broadcast"] = "13"
params["wep_key_len_unicast"] = "13"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct-1x", key_mgmt="IEEE8021X", eap="PSK",
identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
def test_radius_das_disconnect(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - Disconnect"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
params['own_ip_addr'] = "127.0.0.1"
params['nas_identifier'] = "nas.example.com"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret="secret", dict=dict)
srv.retries = 1
srv.timeout = 1
logger.info("Disconnect-Request with incorrect secret")
req = radius_das.DisconnectPacket(dict=dict, secret="incorrect",
User_Name="foo",
NAS_Identifier="localhost",
Event_Timestamp=int(time.time()))
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with incorrect secret properly ignored")
logger.info("Disconnect-Request without Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="psk.user@example.com")
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request without Event-Timestamp properly ignored")
logger.info("Disconnect-Request with non-matching Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="psk.user@example.com",
Event_Timestamp=123456789)
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with non-matching Event-Timestamp properly ignored")
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="foo",
User_Password="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 401)
logger.info("Disconnect-Request with invalid Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="foo",
Calling_Station_Id="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 407)
logger.info("Disconnect-Request with mismatching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Calling_Station_Id="12:34:56:78:90:aa",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Session_Id="12345678-87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Multi_Session_Id="12345678+87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Multi_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with no session identification attributes")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with mismatching NAS-IP-Address")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="192.168.3.4",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
logger.info("Disconnect-Request with mismatching NAS-Identifier")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="unknown.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED", "CTRL-EVENT-CONNECTED"])
if ev is None:
raise Exception("Timeout while waiting for re-connection")
if "CTRL-EVENT-EAP-STARTED" not in ev:
raise Exception("Unexpected skipping of EAP authentication in reconnection")
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id and non-matching CUI")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Calling_Station_Id=addr,
Chargeable_User_Identity="foo@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
logger.info("Disconnect-Request with matching CUI")
dev[1].connect("radius-das", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[1].wait_disconnected(timeout=10)
dev[1].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
connect(dev[2], "radius-das")
logger.info("Disconnect-Request with matching User-Name - multiple sessions matching")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=508)
logger.info("Disconnect-Request with User-Name matching multiple sessions, Calling-Station-Id only one")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[2].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id after disassociation")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
logger.info("Disconnect-Request with matching User-Name after disassociation")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
dev[2].request("DISCONNECT")
dev[2].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching CUI after disassociation")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching Calling-Station-Id after disassociation")
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with mismatching Calling-Station-Id after disassociation")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
def test_radius_das_coa(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - CoA"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret="secret", dict=dict)
srv.retries = 1
srv.timeout = 1
# hostapd does not currently support CoA-Request, so NAK is expected
logger.info("CoA-Request with matching Acct-Session-Id")
req = radius_das.CoAPacket(dict=dict, secret="secret",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.CoANAK, error_cause=405)
def test_radius_ipv6(dev, apdev):
"""RADIUS connection over IPv6"""
params = {}
params['ssid'] = 'as'
params['beacon_int'] = '2000'
params['radius_server_clients'] = 'auth_serv/radius_clients_ipv6.conf'
params['radius_server_ipv6'] = '1'
params['radius_server_auth_port'] = '18129'
params['radius_server_acct_port'] = '18139'
params['eap_server'] = '1'
params['eap_user_file'] = 'auth_serv/eap_user.conf'
params['ca_cert'] = 'auth_serv/ca.pem'
params['server_cert'] = 'auth_serv/server.pem'
params['private_key'] = 'auth_serv/server.key'
hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="radius-ipv6")
params['auth_server_addr'] = "::0"
params['auth_server_port'] = "18129"
params['acct_server_addr'] = "::0"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['own_ip_addr'] = "::0"
hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-ipv6")
def test_radius_macacl(dev, apdev):
"""RADIUS MAC ACL"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
def test_radius_macacl_acct(dev, apdev):
"""RADIUS MAC ACL and accounting enabled"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected()
dev[1].request("RECONNECT")
def test_radius_failover(dev, apdev):
"""RADIUS Authentication and Accounting server failover"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-failover")
params["auth_server_addr"] = "192.168.213.17"
params["auth_server_port"] = "1812"
params["auth_server_shared_secret"] = "testing"
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "testing"
params['radius_retry_primary_interval'] = "20"
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
hapd.set("auth_server_addr", "127.0.0.1")
hapd.set("auth_server_port", "1812")
hapd.set("auth_server_shared_secret", "radius")
hapd.set('acct_server_addr', "127.0.0.1")
hapd.set('acct_server_port', "1813")
hapd.set('acct_server_shared_secret', "radius")
hapd.enable()
ev = hapd.wait_event(["AP-ENABLED", "AP-DISABLED"], timeout=30)
if ev is None:
raise Exception("AP startup timed out")
if "AP-ENABLED" not in ev:
raise Exception("AP startup failed")
start = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[0].request("SET EAPOL::authPeriod 5")
connect(dev[0], "radius-failover", wait_connect=False)
dev[0].wait_connected(timeout=20)
finally:
dev[0].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
end = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[1].request("SET EAPOL::authPeriod 5")
if end - start < 21:
time.sleep(21 - (end - start))
connect(dev[1], "radius-failover", wait_connect=False)
dev[1].wait_connected(timeout=20)
finally:
dev[1].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
def run_pyrad_server(srv, t_events):
srv.RunWithStop(t_events)
def test_radius_protocol(dev, apdev):
"""RADIUS Authentication protocol tests with a fake server"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
if self.t_events['msg_auth'].is_set():
logger.info("Add Message-Authenticator")
if self.t_events['wrong_secret'].is_set():
logger.info("Use incorrect RADIUS shared secret")
pw = "incorrect"
else:
pw = reply.secret
hmac_obj = hmac.new(pw)
hmac_obj.update(struct.pack("B", reply.code))
hmac_obj.update(struct.pack("B", reply.id))
# reply attributes
reply.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = reply._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(pkt.authenticator)
hmac_obj.update(attrs)
if self.t_events['double_msg_auth'].is_set():
logger.info("Include two Message-Authenticator attributes")
else:
del reply[80]
reply.AddAttribute("Message-Authenticator", hmac_obj.digest())
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['msg_auth'] = threading.Event()
t_events['wrong_secret'] = threading.Event()
t_events['double_msg_auth'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
params = hostapd.wpa2_eap_params(ssid="radius-test")
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-test", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['msg_auth'].set()
t_events['wrong_secret'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['wrong_secret'].clear()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['double_msg_auth'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_psk(dev, apdev):
"""WPA2 with PSK from RADIUS"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
a = "\xab\xcd"
secret = reply.secret
if self.t_events['long'].is_set():
p = b'\x10' + "0123456789abcdef" + 15 * b'\x00'
b = hashlib.md5(secret + pkt.authenticator + a).digest()
pp = bytearray(p[0:16])
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
b = hashlib.md5(reply.secret + bytes(cc)).digest()
pp = bytearray(p[16:32])
bb = bytearray(b)
cc += bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
data = '\x00' + a + bytes(cc)
else:
p = b'\x08' + "12345678" + 7 * b'\x00'
b = hashlib.md5(secret + pkt.authenticator + a).digest()
pp = bytearray(p)
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
data = '\x00' + a + bytes(cc)
reply.AddAttribute("Tunnel-Password", data)
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['long'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '2'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk="12345678", scan_freq="2412")
t_events['long'].set()
dev[1].connect(ssid, psk="0123456789abcdef", scan_freq="2412")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_invalid(dev, apdev):
"""WPA2 with invalid PSK from RADIUS"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
a = "\xab\xcd"
secret = reply.secret
p = b'\x07' + "1234567" + 8 * b'\x00'
b = hashlib.md5(secret + pkt.authenticator + a).digest()
pp = bytearray(p)
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
data = '\x00' + a + bytes(cc)
reply.AddAttribute("Tunnel-Password", data)
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '2'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk="12345678", scan_freq="2412",
wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_auth_force_client_addr(dev, apdev):
"""RADIUS client address specified"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['radius_client_addr'] = "127.0.0.1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth")
def test_radius_auth_force_invalid_client_addr(dev, apdev):
"""RADIUS client address specified and invalid address"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
#params['radius_client_addr'] = "10.11.12.14"
params['radius_client_addr'] = "1::2"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection")
def add_message_auth(req):
req.authenticator = req.CreateAuthenticator()
hmac_obj = hmac.new(req.secret)
hmac_obj.update(struct.pack("B", req.code))
hmac_obj.update(struct.pack("B", req.id))
# request attributes
req.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = req._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(req.authenticator)
hmac_obj.update(attrs)
del req[80]
req.AddAttribute("Message-Authenticator", hmac_obj.digest())
def test_radius_server_failures(dev, apdev):
"""RADIUS server failure cases"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
dict = pyrad.dictionary.Dictionary("dictionary.radius")
client = pyrad.client.Client(server="127.0.0.1", authport=1812,
secret="radius", dict=dict)
client.retries = 1
client.timeout = 1
# unexpected State
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
req['State'] = 'foo-state'
add_message_auth(req)
reply = client.SendPacket(req)
if reply.code != pyrad.packet.AccessReject:
raise Exception("Unexpected RADIUS response code " + str(reply.code))
# no EAP-Message
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
add_message_auth(req)
try:
reply = client.SendPacket(req)
raise Exception("Unexpected response")
except pyrad.client.Timeout:
pass
def test_ap_vlan_wpa2_psk_radius_required(dev, apdev):
"""AP VLAN with WPA2-PSK and RADIUS attributes required"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
secret = reply.secret
if self.t_events['extra'].is_set():
reply.AddAttribute("Chargeable-User-Identity", "test-cui")
reply.AddAttribute("User-Name", "test-user")
if self.t_events['long'].is_set():
reply.AddAttribute("Tunnel-Type", 13)
reply.AddAttribute("Tunnel-Medium-Type", 6)
reply.AddAttribute("Tunnel-Private-Group-ID", "1")
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['long'] = threading.Event()
t_events['extra'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['dynamic_vlan'] = "2"
params['wpa_passphrase'] = '0123456789abcdefghi'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
logger.info("connecting without VLAN")
dev[0].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters")
logger.info("connecting without VLAN failed as expected")
logger.info("connecting without VLAN (CUI/User-Name)")
t_events['extra'].set()
dev[1].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[1].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters(2)")
logger.info("connecting without VLAN failed as expected(2)")
t_events['extra'].clear()
t_events['long'].set()
logger.info("connecting with VLAN")
dev[2].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-SSID-TEMP-DISABLED" in ev:
raise Exception("Unexpected failure with vlan parameters")
logger.info("connecting with VLAN succeeded as expected")
finally:
t_events['stop'].set()
t.join()
def test_radius_mppe_failure(dev, apdev):
"""RADIUS failure when adding MPPE keys"""
params = { "ssid": "as", "beacon_int": "2000",
"radius_server_clients": "auth_serv/radius_clients.conf",
"radius_server_auth_port": '18127',
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ca.pem",
"server_cert": "auth_serv/server.pem",
"private_key": "auth_serv/server.key" }
authsrv = hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="test-wpa2-eap")
params['auth_server_port'] = "18127"
hapd = hostapd.add_ap(apdev[0], params)
with fail_test(authsrv, 1, "os_get_random;radius_msg_add_mppe_keys"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", eap="TTLS",
identity="user", anonymous_identity="ttls",
password="password",
ca_cert="auth_serv/ca.pem", phase2="autheap=GTC",
wait_connect=False, scan_freq="2412")
dev[0].wait_disconnected()
dev[0].request("REMOVE_NETWORK all")
|
qt.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import threading
from threading import Thread
import re
from decimal import Decimal
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from electrum.gui.qt.util import *
from electrum.gui.qt.qrcodewidget import QRCodeWidget
from electrum.gui.qt.amountedit import AmountEdit
from electrum.gui.qt.main_window import StatusBarButton
from electrum.i18n import _
from electrum.plugin import hook
from electrum.util import PrintError, is_valid_email
from .trustedcoin import TrustedCoinPlugin, server
class TOS(QTextEdit):
tos_signal = pyqtSignal()
error_signal = pyqtSignal(object)
class HandlerTwoFactor(QObject, PrintError):
def __init__(self, plugin, window):
super().__init__()
self.plugin = plugin
self.window = window
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.plugin.wallet_class):
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.print_error("twofactor: xpub3 not needed")
return
window = self.window.top_level_window()
auth_code = self.plugin.auth_dialog(window)
try:
wallet.on_otp(tx, auth_code)
except:
on_failure(sys.exc_info())
return
on_success(tx)
class Plugin(TrustedCoinPlugin):
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
wallet.handler_2fa = HandlerTwoFactor(self, window)
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(QIcon(":icons/trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
self.start_request_thread(window.wallet)
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
msg = _('If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.')
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure)
def waiting_dialog(self, window, on_finished=None):
task = partial(self.request_billing_info, window.wallet)
return WaitingDialog(window, 'Getting billing information...', task,
on_finished)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.start_request_thread(wallet)
window.show_error(_('Requesting account info from TrustedCoin server...') + '\n' +
_('Please try again.'))
return True
return False
def settings_dialog(self, window):
self.waiting_dialog(window, partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin-status.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay(self.config)
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0)
grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, window, k, v, d):
d.close()
if window.pluginsdialog:
window.pluginsdialog.close()
wallet = window.wallet
uri = "bitcore:" + wallet.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
wallet.is_billing = True
window.pay_to_URI(uri)
window.payto_e.setFrozen(True)
window.message_e.setFrozen(True)
window.amount_e.setFrozen(True)
def go_online_dialog(self, wizard):
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.storage.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.stack = []
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use'))
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = TOS()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
tos_received = False
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
try:
tos = server.get_terms_of_service()
except Exception as e:
import traceback
traceback.print_exc(file=sys.stderr)
tos_e.error_signal.emit(_('Could not retrieve Terms of Service:')
+ '\n' + str(e))
return
self.TOS = tos
tos_e.tos_signal.emit()
def on_result():
tos_e.setText(self.TOS)
nonlocal tos_received
tos_received = True
set_enabled()
def on_error(msg):
window.show_error(str(msg))
window.terminate()
def set_enabled():
next_button.setEnabled(tos_received and is_valid_email(email_e.text()))
tos_e.tos_signal.connect(on_result)
tos_e.error_signal.connect(on_error)
t = Thread(target=request_TOS)
t.setDaemon(True)
t.start()
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
email = str(email_e.text())
self.create_remote_key(email, window)
def request_otp_dialog(self, window, short_id, otp_secret, xpub3):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with TrustedCoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False)
self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked())
|
rabbit_sub_throughput.py
|
import os
from os.path import dirname
import sys
sys.path.append((dirname(sys.path[0])))
from arguments import argparser
import time
import datetime
import pika
from multiprocessing import Process
def pub(n_sec, topic):
start = time.time_ns()
cnt = 0
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue=topic) # queue 생성
n_ns = n_sec * 1000000000
def pub_():
channel.basic_publish(exchange='', routing_key=topic, body=str(time.time()),
properties=pika.BasicProperties(timestamp=int(time.time())))
while True:
pub_()
if time.time_ns() > start + n_ns:
break
cnt += 1
print(f"pub throughput {cnt / n_sec} msgs")
def sub(n_sec, topic):
start = time.time_ns()
sub_cnt = 0
n_ns = n_sec * 1000000000
def callback(ch, method, properties, body):
nonlocal sub_cnt
sub_cnt += 1
if time.time_ns() > start + n_ns:
channel.stop_consuming()
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue=topic) # queue 생성
channel.basic_consume(queue=topic, auto_ack=True, on_message_callback=callback)
# print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
print(f"sub throughput {sub_cnt / n_sec}")
if __name__ == '__main__':
args = argparser()
sub_proc = Process(target=sub, args = [args.n_seconds,"hello"])
sub_proc.start()
pub_proc = Process(target=pub, args = [args.n_seconds, "hello"])
pub_proc.start()
procs = [sub_proc, pub_proc]
for proc in procs:
proc.join()
|
scrape_polo_feather.py
|
# core
import os
import sys
import time
from datetime import datetime, timedelta
from threading import Thread
import traceback
# installed
# if running from the code/ folder, this will try to import
# a module Poloniex from the folder. Better to run from within the
# poloniex folder as a result
from poloniex import Poloniex
import pandas as pd
import feather as ft
def get_home_dir(repo='crypto_predict'):
cwd = os.path.realpath(__file__) # gets location of this file
cwd_list = cwd.split('/')
repo_position = [i for i, s in enumerate(cwd_list) if s == repo]
if len(repo_position) > 1:
print("error! more than one intance of repo name in path")
return None
home_dir = '/'.join(cwd_list[:repo_position[0] + 1]) + '/'
return home_dir
HOME_DIR = get_home_dir()
key = os.environ.get('polo_key')
sec = os.environ.get('polo_sec')
polo = Poloniex(key, sec)
def make_data_dirs():
"""
Checks if data directory exists, if not, creates it.
"""
exchanges = ['poloniex', 'bittrex']
folds = ['order_books', 'trade_history', '550h_trade_history']
data_folders = ['data/' + f + '/' for f in folds]
dds = ['data'] + data_folders + \
[d + e for d in data_folders for e in exchanges]
dirs = [HOME_DIR + d for d in dds]
for d in dirs:
if not os.path.exists(d):
os.mkdir(d)
make_data_dirs()
TRADE_DATA_DIR = HOME_DIR + 'data/trade_history/poloniex/'
SM_TRADE_DATA_DIR = HOME_DIR + 'data/550h_trade_history/poloniex/'
CSV_WRITE_CHUNK = 500000 # chunksize for writing csv...doesn't seem to make a difference
def get_all_orderbooks():
"""
returns dicts of pandas dataframes with all currency pair orderbooks,
full depth
"""
# returns dict with currencyPair as primary keys, then 'asks', 'bids'
# 'isFrozen', 'seq' - seq is the sequence number for the push api
orderbooks = polo.returnOrderBook(currencyPair='all', depth=1000000)
timestamp = pd.to_datetime(datetime.now())
sell_dfs = {}
buy_dfs = {}
sell_headers = ['price', 'amt']
buy_headers = ['price', 'amt']
for c in orderbooks:
sell_dfs[c] = pd.DataFrame(orderbooks[c]['asks'],
columns=sell_headers)
buy_dfs[c] = pd.DataFrame(orderbooks[c]['bids'],
columns=buy_headers)
sell_dfs[c]['timestamp'] = timestamp
buy_dfs[c]['timestamp'] = timestamp
sell_dfs[c].set_index('timestamp', inplace=True)
buy_dfs[c].set_index('timestamp', inplace=True)
return buy_dfs, sell_dfs
def save_orderbooks(buy_dfs, sell_dfs):
"""
Saves all orderbooks at a given time
"""
for c in buy_dfs.keys():
save_orderbook(buy_dfs[c], sell_dfs[c], c)
def save_orderbook(buy_df, sell_df, market):
"""
Saves one orderbook, in separate buy and sell files
"""
datapath = HOME_DIR + 'data/order_books/poloniex/'
buy_file = datapath + 'buy_orders_' + market + '.csv.gz'
sell_file = datapath + 'sell_orders_' + market + '.csv.gz'
print('saving', market)
if os.path.exists(buy_file):
buy_df.to_csv(buy_file, compression='gzip', mode='a', header=False, chunksize=CSV_WRITE_CHUNK)
sell_df.to_csv(sell_file, compression='gzip', mode='a', header=False, chunksize=CSV_WRITE_CHUNK)
else:
buy_df.to_csv(buy_file, compression='gzip', chunksize=CSV_WRITE_CHUNK)
sell_df.to_csv(sell_file, compression='gzip', chunksize=CSV_WRITE_CHUNK)
def save_all_order_books():
print('retrieving orderbooks...')
buy_dfs, sell_dfs = get_all_orderbooks()
print('done.')
save_orderbooks(buy_dfs, sell_dfs)
def continuously_save_order_books(interval=600):
"""
Saves all order books every 'interval' seconds.
Poloniex allows 6 calls/second before your IP is banned.
"""
def keep_saving():
while True:
try:
save_all_order_books()
except:
traceback.print_exc()
time.sleep(interval)
thread = Thread(target=keep_saving)
thread.start()
def recover_file(datafile, chunksize=1000000):
"""
If EOFError comes up, reads as much of the dataframe as possible.
This will happen if the computer is shut down while writing data.
"""
full_df = None
skip = 0
while True:
try:
if skip == 0:
cur_df = pd.read_csv(datafile, chunksize=chunksize, index_col='date', parse_dates=['date'])
else:
cur_df = pd.read_csv(datafile, chunksize=chunksize, index_col='date', parse_dates=['date'], skiprows=range(1, skip))
for c in cur_df:
if full_df is None:
full_df = c
else:
full_df = full_df.append(c)
except EOFError: # eventually we will hit this when we get to the corrupted part
if full_df is not None:
skip = full_df.shape[0]
if chunksize == 1:
return full_df
chunksize = chunksize // 2
if chunksize == 0:
chunksize = 1
def convert_earliest_to_latest(market):
"""
takes a file that is latest to earliest and flips it
might not want to do this actually, because then restoring earliest data from corrupted files will be trickier
"""
datafile = TRADE_DATA_DIR + market + '.csv.gz'
try:
old_df = pd.read_csv(datafile, index_col='date', parse_dates=['date'])
except EOFError:
print('corrupted file, restoring from backup...')
old_df = recover_file(datafile)
first_date = old_df.index[0]
last_date = old_df.index[-1]
if last_date > first_date: # it is from oldest to newest
df = old_df.iloc[::-1].copy()
df.to_csv(datafile, compression='gzip', chunksize=CSV_WRITE_CHUNK)
else:
print('file is already newest to oldest!')
def remove_dupes(market='BTC_AMP'):
"""
pretty self-explanatory
"""
ft_datafile = TRADE_DATA_DIR + market + '.ft'
old_df = ft.read_dataframe(ft_datafile)
dd_df = old_df.drop_duplicates()
num_dupes = old_df.shape[0] - dd_df.shape[0]
if num_dupes == 0:
print('no dupes, skipping...')
return
while num_dupes > 0: # had a problem with not having dupes actually all dropped...
print('dropping', num_dupes, 'dupes')
dd_sh1 = dd_df.shape[0]
dd_df = dd_df.drop_duplicates()
num_dupes = dd_sh1 - dd_df.shape[0]
for i in range(10):
print(num_dupes, 'dupes')
dd_sh1 = dd_df.shape[0]
dd_df = dd_df.drop_duplicates()
num_dupes = dd_sh1 - dd_df.shape[0]
if num_dupes == 0:
break
dd_df = dd_df.drop_duplicates() # one more time to be extra sure
dd_df.sort_index(inplace=True)
ft.write_dataframe(dd_df, ft_datafile)
def remove_all_dupes():
ticks = polo.returnTicker()
pairs = sorted(ticks.keys())
for c in pairs:
print('cleaning', c)
remove_dupes(c)
def check_for_dupes(market='BTC_AMP'):
datafile = TRADE_DATA_DIR + market + '.ft'
old_df = ft.read_dataframe(datafile)
dd_df = old_df.drop_duplicates()
print(old_df.shape[0] - dd_df.shape[0], 'dupes')
def make_last_550_h_df(market):
"""
prediction algo is currently using 480 historical points to predict 24 h in the future
usually throw away first 24 points, so minimum would be 528 hours, but using 550
just to be safe
"""
datafile = TRADE_DATA_DIR + market + '.ft'
sm_datafile = SM_TRADE_DATA_DIR + market + '.ft'
full_df = ft.read_dataframe(datafile)
latest_ts = full_df['date'].max()
past_ts = latest_ts - timedelta(hours=550)
mask = (full_df['date'] > past_ts) & (full_df['date'] <= latest_ts)
small_df = full_df.loc[mask]
ft.write_dataframe(small_df, sm_datafile)
def make_all_last_550_h_dfs():
"""
for initially making the 550h df csvs if they don't exist, otherwise they are
updated by the scrape in get_trade_history
"""
ticks = polo.returnTicker()
pairs = sorted(ticks.keys())
for c in pairs:
print('making df for', c)
make_last_550_h_df(c)
def update_550_h_df(full_df, market):
sm_datafile = SM_TRADE_DATA_DIR + market + '.ft'
sm_df = ft.read_dataframe(sm_datafile)
latest_ts = full_df['date'].max()
past_ts = latest_ts - timedelta(hours=550)
mask = (full_df['date'] > past_ts) & (full_df['date'] <= latest_ts)
small_df = full_df.loc[mask]
ft.write_dataframe(small_df, sm_datafile)
def convert_csv_feather(market='BTC_AMP'):
datafile = TRADE_DATA_DIR + market + '.csv.gz'
ft_datafile = TRADE_DATA_DIR + market + '.ft'
old_df = pd.read_csv(datafile, index_col='date', parse_dates=['date'])
old_df.reset_index(inplace=True)
ft.write_dataframe(old_df, ft_datafile)
def convert_all_to_feather():
ticks = polo.returnTicker()
pairs = sorted(ticks.keys())
for c in pairs:
print('converting to feather:', c)
convert_csv_feather(market=c)
def convert_ft_hdf5(market='BTC_AMP'):
ft_datafile = TRADE_DATA_DIR + market + '.ft'
hdf_datafile = TRADE_DATA_DIR + market + '.hdf5'
old_df = ft.read_dataframe(ft_datafile)
old_df.to_hdf(hdf_datafile, 'data', mode='w', complib='blosc', complevel=9, format='table')
def convert_all_to_hdf5():
ticks = polo.returnTicker()
pairs = sorted(ticks.keys())
for c in pairs:
print('converting to hdf5:', c)
convert_ft_hdf5(market=c)
def get_trade_history(market='BTC_AMP', two_h_delay=False, latest=None):
"""
:param two_h_delay: if a 2 hour delay should be enacted between scrapings
:param latest: pandas series with latest trade datapoint in csv
"""
# first check the latest date on data already there
datafile = TRADE_DATA_DIR + market + '.ft'
latest_ts = None
old_df = None
if os.path.exists(datafile):
# right now the csvs are saved as earliest data in the top
old_df = ft.read_dataframe(datafile)
latest_ts = old_df.iloc[-1]['date'].value / 10**9
# get current timestamp in UTC...tradehist method takes utc times
d = datetime.utcnow()
epoch = datetime(1970, 1, 1)
cur_ts = (d - epoch).total_seconds()
if two_h_delay and (cur_ts - latest_ts) < 7200:
print('scraped within last 2 hours, not scraping again...')
return None, None, None
else:
print('scraping updates')
update = True
else:
print('scraping new, no file exists')
update = False
# get current timestamp in UTC...tradehist method takes utc times
d = datetime.utcnow()
epoch = datetime(1970, 1, 1)
cur_ts = (d - epoch).total_seconds()
# get past time, subtract 4 weeks
past = cur_ts - 60*60*24*7*4
h = polo.marketTradeHist(currencyPair=market, start=past, end=cur_ts)
full_df = pd.io.json.json_normalize(h)
full_df['date'] = pd.to_datetime(full_df['date'])
# very_earliest keeps track of the last date in the saved df on disk
if latest_ts is None:
very_earliest = 0
else:
very_earliest = latest_ts
earliest = 0
cur_earliest = full_df.iloc[-1]['date'].value / 10**9
# if we get to the start of the data, quit, or if the earliest currently
# scraped date is less than the earliest in the saved df on disk, break
# the loop
while cur_earliest != earliest and cur_earliest > very_earliest:
earliest = cur_earliest
past = earliest - 60*60*24*7*4 # subtract 4 weeks
print('scraping another time...')
start = time.time()
h = polo.marketTradeHist(currencyPair=market, start=past, end=earliest)
elapsed = time.time() - start
# max api calls are 6/sec, don't want to get banned...
if elapsed < 1/6.:
print('scraping too fast, sleeping...')
time.sleep(1/5. - elapsed)
df = pd.io.json.json_normalize(h)
df['date'] = pd.to_datetime(df['date'])
full_df = full_df.append(df)
cur_earliest = df.iloc[-1]['date'].value / 10**9
# find where we should cutoff new data
full_df.sort_values(by='tradeID', inplace=True)
full_df.reset_index(inplace=True, drop=True)
if latest is not None:
latest_idx = full_df[full_df['globalTradeID'] == latest['globalTradeID']].index[0]
# take everything from the next trade on
full_df = full_df.iloc[latest_idx + 1:]
if full_df.shape[0] > 0:
# sometimes some duplicates
full_df.drop_duplicates(inplace=True)
# sorted from oldest at the top to newest at bottom for now
for col in ['amount', 'rate', 'total']:
full_df[col] = pd.to_numeric(full_df[col])
update_550_h_df(full_df, market)
return full_df, update, old_df
else:
return None, None, None
def save_trade_history(df, market, update, old_df=None):
"""
Saves a dataframe of the trade history for a market.
"""
filename = TRADE_DATA_DIR + market + '.ft'
if update:
full_df = old_df.append(df)
full_df.drop_duplicates(inplace=True)
full_df.sort_values(by='date', inplace=True)
ft.write_dataframe(full_df, filename)
else:
ft.write_dataframe(df, filename)
def save_all_trade_history(two_h_delay=False):
lat_scr_file = '/'.join(TRADE_DATA_DIR.split('/')[:-2] + ['']) + 'latest_polo_scrape_dates.csv'
lat_scr_df = None
if os.path.exists(lat_scr_file):
try:
lat_scr_df = pd.read_csv(lat_scr_file, index_col='market', parse_dates=['date'])
except EOFError:
lat_scr_df = None
ticks = polo.returnTicker()
pairs = sorted(ticks.keys())
for c in pairs:
print('checking', c)
if lat_scr_df is None:
df, update, old_df = get_trade_history(c, two_h_delay=two_h_delay)
else:
if c in lat_scr_df.index:
df, update, old_df = get_trade_history(c, two_h_delay=two_h_delay, latest=lat_scr_df.loc[c])
else:
df, update, old_df = get_trade_history(c, two_h_delay=two_h_delay)
if df is not None:
print('saving', c)
save_trade_history(df, c, update, old_df)
# update the latest scrape date
if lat_scr_df is None:
lat_scr_df = df.iloc[-1:].copy()
lat_scr_df['market'] = c
lat_scr_df.set_index('market', inplace=True)
lat_scr_df['tradeID'] = lat_scr_df['tradeID'].astype('int')
lat_scr_df['globalTradeID'] = lat_scr_df['globalTradeID'].astype('int')
lat_scr_df.to_csv(lat_scr_file, chunksize=CSV_WRITE_CHUNK)
elif c in lat_scr_df.index:
temp_df = df.iloc[-1:].copy()
for col in lat_scr_df.columns:
lat_scr_df.loc[c, col] = temp_df.iloc[-1][col]
lat_scr_df['tradeID'] = lat_scr_df['tradeID'].astype('int')
lat_scr_df['globalTradeID'] = lat_scr_df['globalTradeID'].astype('int')
lat_scr_df.to_csv(lat_scr_file, chunksize=CSV_WRITE_CHUNK)
else:
temp_df = df.iloc[-1:].copy()
for col in lat_scr_df.columns:
lat_scr_df.loc[c, col] = temp_df.iloc[-1][col]
lat_scr_df['tradeID'] = lat_scr_df['tradeID'].astype('int')
lat_scr_df['globalTradeID'] = lat_scr_df['globalTradeID'].astype('int')
lat_scr_df.to_csv(lat_scr_file, chunksize=CSV_WRITE_CHUNK)
print('done!')
def continuously_save_trade_history(interval=600):
"""
Saves all order books every 'interval' seconds.
Poloniex allows 6 calls/second before your IP is banned.
"""
def keep_saving():
while True:
try:
save_all_trade_history()
except:
traceback.print_exc()
time.sleep(interval)
thread = Thread(target=keep_saving)
thread.start()
def get_all_loans():
"""
"""
pass
def get_loans(m='BTC_ETH'):
"""
"""
pass
# TODO: get all trade history
# get all market depth and subscribe to updates, on major change (buy/sell)
# use marketTradeHist
# notify telegram bot etc
if __name__ == "__main__":
pass
# updates all trade histories
#save_all_trade_history()
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.tpu import tensor_tracer
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import error_handling
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True,
rendezvous=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._feed_error = None
self._finished = False
self._should_initialize_tpu = True
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._init_ops = []
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
start = time.time()
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
logging.info('Initialized TPU in %d seconds', time.time() - start)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
self._feed_error = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, rendezvous=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=ctx.num_of_replicas_per_host)
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping singal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl)
captured_infeed_queue.capture(infeed_queue)
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
def device_function_impl(replica_id):
return ctx.tpu_host_placement_function(replica_id=replica_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims,
label_dims_names, label_names, has_labels):
"""Flatten input dims with the same order as flattened input tensors."""
flattened_input_dims = []
if feature_dims_names:
# We need a fixed ordering for matching the tensors in features.
flattened_input_dims.extend(
[feature_dims[name] for name in feature_dims_names])
else:
flattened_input_dims.append(feature_dims)
if label_dims_names:
# We need a fixed ordering for matching the tensors in labels.
flattened_input_dims.extend(
[label_dims[name] for name in label_dims_names])
else:
if label_names:
num_tensors_in_label = len(label_names)
else:
num_tensors_in_label = int(has_labels)
# Setting `None` in input_partition_dims[1] will apply `None` to
# all the tensors in labels, regardless of internal structure.
flattened_input_dims.extend([label_dims] * num_tensors_in_label)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
self._feature_dims, feature_dims_names, self._label_dims,
label_dims_names, label_names, has_labels)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
return _Inputs(
unflattened_inputs['features'],
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
tracing_ops = []
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
loss, tracing_ops = tt.trace_tpu(ops.get_default_graph(), loss,
self._ctx.num_replicas)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]+tracing_ops):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return
`EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case
the following discussion on TPU evaluation does not apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`,
and another with `tag_constants.SERVING` and `tag_constants.TPU`.
At serving time, these tags are used to select metagraph to load.
Before running the graph on TPU, TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If
not, please call `session.run(tpu.initialize_system())`.
`tpu.outside_compilation` can be used to wrap TPU incompatible ops in
`model_fn`.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
warm_start_from=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below. - Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_savedmodel()` exports a metagraph for
serving on TPU besides the one on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu)
self._export_to_tpu = export_to_tpu
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
raise NotImplementedError(
'TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; '
'got {}.'.format(mode))
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables))
if self._export_to_tpu:
input_receiver_fn_map = {
_REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _REWRITE_FOR_INFERENCE_MODE
# See b/110052256 for why `check_variables` is `False`.
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=False,
mode=mode,
export_tags=export_tags,
check_variables=False))
def _call_model_fn(self, features, labels, mode, config):
if mode == _REWRITE_FOR_INFERENCE_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_savedmodel`."""
if mode != _REWRITE_FOR_INFERENCE_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode))
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
mode = model_fn_lib.ModeKeys.PREDICT
estimator_spec = self._call_model_fn(features, labels, mode, config)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
tensors_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
tensors = nest.flatten(tensors_dict)
tpu_tensors = [t for t in tensors if _is_tpu_tensor(t)]
# We cannot return anything other than `tpu_tensors` here so we capture
# the rest for later use.
capture.capture((estimator_spec, tensors_dict, tensors))
return tpu_tensors
tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation)
estimator_spec, tensors_dict, tensors = capture.get()
# Reconstruct `tensors`, but with `tpu_tensors` replaced with
# `tpu_tensors_on_cpu`.
new_tensors = []
for t in tensors:
if _is_tpu_tensor(t):
new_tensors.append(tpu_tensors_on_cpu.pop(0))
elif t is None:
new_tensors.append(None)
else:
# Only fetching `tpu_tensors_on_cpu` does not trigger
# TPU computation and blocks, so we add the control dependency here.
control_inputs = (
tpu_tensors_on_cpu if _is_iterable(tpu_tensors_on_cpu) else
(tpu_tensors_on_cpu,))
with ops.control_dependencies(control_inputs):
new_tensors.append(array_ops.identity(t))
# Reconstruct `tensors_dict`.
new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors)
# Reconstruct `export_outputs`.
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_tensors_dict))
return estimator_spec._replace(export_outputs=new_export_outputs)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_savedmodel()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if self._log_every_n_steps is not None:
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
output_dir=self.model_dir,
every_n_steps=self._log_every_n_steps)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if self._log_every_n_steps is not None:
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'shutdown_worker')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(timeout_ms=60 * 1000),
]
elif shutdown_mode == 'shutdown_computation':
finalizer_hooks = [
session_support.RestartComputation(timeout_ms=60 * 1000),
]
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
),
InstallSignalHandlerHook()
])
if self._log_every_n_steps is not None:
logging_hook_frequency = ( # Divide and round up
(self._log_every_n_steps +
self._config.tpu_config.iterations_per_loop - 1) //
self._config.tpu_config.iterations_per_loop)
hooks.append(
training.LoggingTensorHook({
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=logging_hook_frequency))
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold, eval_hooks = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode]),
] + input_hooks
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(dummy_predict_op, host_calls,
scaffold, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode]),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _is_tpu_tensor(tensor):
if not isinstance(tensor, ops.Tensor):
return False
try:
tensor.op.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) # pylint: disable=protected-access
except ValueError:
return True
else:
return False
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold, captured_eval_hooks.get()
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_train_step,
[_INITIAL_LOSS])
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold, captured_training_hooks.get()
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold, captured_predict_hooks.get()
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = self._dataset.make_initializable_iterator()
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if isinstance(params, hparam.HParams):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path,
strip_default_attrs)
|
site_list.py
|
#! /usr/bin/env python3
"""Snoop: скрипт для обновления БД списка сайтов
"""
import json
import sys
import requests
import threading
import xml.etree.ElementTree as ET
from datetime import datetime
from argparse import ArgumentParser, RawDescriptionHelpFormatter
pool = list()
pool1 = list()
def get_rank(domain_to_query, dest):
result = -1
#Retrieve ranking data via alexa API
url = f"http://data.alexa.com/data?cli=10&url={domain_to_query}"
xml_data = requests.get(url).text
root = ET.fromstring(xml_data)
try:
#Get ranking for this site.
dest['rank'] = int(root.find(".//REACH").attrib["RANK"])
except:
#We did not find the rank for some reason.
print(f"Error retrieving rank information for '{domain_to_query}'")
print(f" Returned XML is |{xml_data}|")
return
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter
)
parser.add_argument("--rank","-r",
action="store_true", dest="rank", default=False,
help="Update all website ranks (not recommended)."
)
args = parser.parse_args()
with open("bad_data.json", "r", encoding="utf8") as bad_file:
data1 = json.load(bad_file)
with open("bad_site.md", "w") as bad_site:
data_length1 = len(data1)
bad_site.write(f'## Snoop БД Неподдерживаемых сайтов (список), всего — {data_length1} сайт(ов)!\n')
for social_network_bad in data1:
url_main_bad = data1.get(social_network_bad).get("urlMain")
data1.get(social_network_bad)["rank"] = 0
if args.rank:
th1 = threading.Thread(target=get_rank, args=(url_main_bad, data.get(social_network_bad)))
else:
th1 = None
pool.append((social_network_bad, url_main_bad, th1))
if args.rank:
th1.start()
index0 = 1
for social_network_bad, url_main_bad, th1 in pool:
if args.rank:
th1.join()
bad_site.write(f'{index0}. [{social_network_bad}]({url_main_bad})\n')
sys.stdout.write("\r{0}".format(f"Обновлено, всего — {data_length1} сайта(ов) в чёрном списке"))
sys.stdout.flush()
index0 = index0 + 1
if args.rank:
bad_site.write(f'\nAlexa.com rank data fetched at ({datetime.utcnow()} UTC)\n')
sorted_json_data_bad = json.dumps(data1, indent=2, sort_keys=True)
with open("bad_data.json", "w") as bad_file:
bad_file.write(sorted_json_data_bad)
with open("data.json", "r", encoding="utf8") as data_file:
data = json.load(data_file)
with open("sites.md", "w") as site_file:
data_length = len(data)
site_file.write(f'## Snoop БД поддерживаемых сайтов (список), всего — {data_length} сайт(ов)!\n')
for social_network in data:
url_main = data.get(social_network).get("urlMain")
data.get(social_network)["rank"] = 0
if args.rank:
th = threading.Thread(target=get_rank, args=(url_main, data.get(social_network)))
else:
th = None
pool1.append((social_network, url_main, th))
if args.rank:
th.start()
index = 1
for social_network, url_main, th in pool1:
if args.rank:
th.join()
site_file.write(f'{index}. [{social_network}]({url_main})\n')
sys.stdout.write("\r{0}".format(f"Обновлено, всего — {data_length} поддерживаемых сайта(ов)"))
sys.stdout.flush()
index = index + 1
if args.rank:
site_file.write(f'\nAlexa.com rank data fetched at ({datetime.utcnow()} UTC)\n')
sorted_json_data = json.dumps(data, indent=2, sort_keys=True)
with open("data.json", "w") as data_file:
data_file.write(sorted_json_data)
print("\n" "Обновлено, всего —" ,data_length1, "сайта(ов) в чёрном списке")
print("\nБД сайтов (.json) упорядочена по алфавиту.")
|
io.py
|
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import logging
import os
import queue
import threading
from itertools import islice
import cudf
import cupy as cp
import numpy as np
import rmm
from cudf._lib.nvtx import annotate
from cudf.io.parquet import ParquetWriter
LOG = logging.getLogger("nvtabular")
#
# Helper Function definitions
#
def _allowable_batch_size(gpu_memory_frac, row_size):
free_mem = rmm.get_info().free
gpu_memory = free_mem * gpu_memory_frac
return max(int(gpu_memory / row_size), 1)
def _get_read_engine(engine, file_path, **kwargs):
LOG.debug("opening '%s' as %s", file_path, engine)
if engine is None:
engine = file_path.split(".")[-1]
if not isinstance(engine, str):
raise TypeError("Expecting engine as string type.")
if engine == "csv":
return CSVFileReader(file_path, **kwargs)
elif engine == "parquet":
return PQFileReader(file_path, **kwargs)
else:
raise ValueError("Unrecognized read engine.")
#
# GPUFileReader Base Class
#
class GPUFileReader:
def __init__(
self, file_path, gpu_memory_frac, batch_size, row_size=None, columns=None, **kwargs
):
""" GPUFileReader Constructor
"""
self.file_path = file_path
self.row_size = row_size
self.columns = columns
self.intialize_reader(gpu_memory_frac, batch_size, **kwargs)
def intialize_reader(self, **kwargs):
""" Define necessary file statistics and properties for reader
"""
raise NotImplementedError()
def __iter__(self):
""" Iterates through the file, yielding a series of cudf.DataFrame objects
"""
raise NotImplementedError()
def __len__(self):
""" Returns the number of dataframe chunks in the file """
raise NotImplementedError()
@property
def estimated_row_size(self):
return self.row_size
#
# GPUFileReader Sub Classes (Parquet and CSV Engines)
#
class PQFileReader(GPUFileReader):
def intialize_reader(self, gpu_memory_frac, batch_size, **kwargs):
self.reader = cudf.read_parquet
# Read Parquet-file metadata
(self.num_rows, self.num_row_groups, columns,) = cudf.io.read_parquet_metadata(
self.file_path
)
# Use first row-group metadata to estimate memory-rqs
# NOTE: We could also use parquet metadata here, but
# `total_uncompressed_size` for each column is
# not representative of dataframe size for
# strings/categoricals (parquet only stores uniques)
self.row_size = self.row_size or 0
if self.num_rows > 0 and self.row_size == 0:
for col in self.reader(self.file_path, num_rows=1)._columns:
# removed logic for max in first x rows, it was
# causing infinite loops for our customers on their datasets.
self.row_size += col.dtype.itemsize
# Check if we are using row groups
self.use_row_groups = kwargs.get("use_row_groups", None)
self.row_group_batch = 1
self.next_row_group = 0
# Determine batch size if needed
if batch_size and not self.use_row_groups:
self.batch_size = batch_size
self.use_row_groups = False
else:
# Use row size to calculate "allowable" batch size
gpu_memory_batch = _allowable_batch_size(gpu_memory_frac, self.row_size)
self.batch_size = min(gpu_memory_batch, self.num_rows)
# Use row-groups if they meet memory constraints
rg_size = int(self.num_rows / self.num_row_groups)
if (self.use_row_groups is None) and (rg_size <= gpu_memory_batch):
self.use_row_groups = True
elif self.use_row_groups is None:
self.use_row_groups = False
# Determine row-groups per batch
if self.use_row_groups:
self.row_group_batch = max(int(gpu_memory_batch / rg_size), 1)
def __len__(self):
return int((self.num_rows + self.batch_size - 1) // self.batch_size)
def __iter__(self):
for nskip in range(0, self.num_rows, self.batch_size):
# not using row groups because concat uses up double memory
# making iterator unable to use selected gpu memory fraction.
batch = min(self.batch_size, self.num_rows - nskip)
LOG.debug(
"loading chunk from %s, (skip_rows=%s, num_rows=%s)", self.file_path, nskip, batch
)
gdf = self.reader(
self.file_path, num_rows=batch, skip_rows=nskip, engine="cudf", columns=self.columns
)
gdf.reset_index(drop=True, inplace=True)
yield gdf
gdf = None
class CSVFileReader(GPUFileReader):
def intialize_reader(self, gpu_memory_frac, batch_size, **kwargs):
self.reader = cudf.read_csv
# Count rows and determine column names
estimate_row_size = False
if self.row_size is None:
self.row_size = 0
estimate_row_size = True
self.offset = 0
self.file_bytes = os.stat(str(self.file_path)).st_size
# Use first row to estimate memory-reqs
names = kwargs.get("names", None)
dtype = kwargs.get("dtype", None)
# default csv delim is ","
sep = kwargs.get("sep", ",")
self.sep = sep
self.names = []
dtype_inf = {}
nrows = 10
head = "".join(islice(open(self.file_path), nrows))
snippet = self.reader(
io.StringIO(head), nrows=nrows, names=names, dtype=dtype, sep=sep, header=0
)
self.inferred_names = not self.names
if self.file_bytes > 0:
for i, col in enumerate(snippet.columns):
if names:
name = names[i]
else:
name = col
self.names.append(name)
for i, col in enumerate(snippet._columns):
if estimate_row_size:
self.row_size += col.dtype.itemsize
dtype_inf[self.names[i]] = col.dtype
self.dtype = dtype or dtype_inf
# Determine batch size if needed
if batch_size:
self.batch_size = batch_size * self.row_size
else:
free_mem = rmm.get_info().free
self.batch_size = free_mem * gpu_memory_frac
self.num_chunks = int((self.file_bytes + self.batch_size - 1) // self.batch_size)
def __len__(self):
return self.num_chunks
def __iter__(self):
for chunks in range(self.num_chunks):
LOG.debug(
"loading chunk from %s, byte_range=%s",
self.file_path,
(chunks * self.batch_size, self.batch_size),
)
chunk = self.reader(
self.file_path,
byte_range=(chunks * self.batch_size, self.batch_size),
names=self.names,
header=0 if chunks == 0 and self.inferred_names else None,
sep=self.sep,
)
if self.columns:
for col in self.columns:
chunk[col] = chunk[col].astype(self.dtype[col])
chunk = chunk[self.columns]
yield chunk
chunk = None
#
# GPUFileIterator (Single File Iterator)
#
class GPUFileIterator:
def __init__(
self,
file_path,
engine=None,
gpu_memory_frac=0.5,
batch_size=None,
columns=None,
use_row_groups=None,
dtypes=None,
names=None,
row_size=None,
**kwargs,
):
self.file_path = file_path
self.engine = _get_read_engine(
engine,
file_path,
columns=columns,
batch_size=batch_size,
gpu_memory_frac=gpu_memory_frac,
use_row_groups=use_row_groups,
dtypes=dtypes,
names=names,
row_size=None,
**kwargs,
)
self.dtypes = dtypes
self.columns = columns
def __iter__(self):
for chunk in self.engine:
if self.dtypes:
self._set_dtypes(chunk)
yield chunk
chunk = None
def __len__(self):
return len(self.engine)
def set_dtypes(self, chunk):
for col, dtype in self.dtypes.items():
if type(dtype) is str:
if "hex" in dtype:
chunk[col] = chunk[col]._column.nvstrings.htoi()
chunk[col] = chunk[col].astype(np.int32)
else:
chunk[col] = chunk[col].astype(dtype)
#
# GPUDatasetIterator (Iterates through multiple files)
#
class GPUDatasetIterator:
"""
Iterates through the files and returns a part of the
data as a GPU dataframe
Parameters
-----------
paths : list of str
Path(s) of the data file(s)
names : list of str
names of the columns in the dataset
engine : str
supported file types are: 'parquet' or 'csv'
gpu_memory_frac : float
fraction of the GPU memory to fill
batch_size : int
number of samples in each batch
columns :
use_row_groups :
dtypes :
row_size: int
"""
def __init__(self, paths, **kwargs):
if isinstance(paths, str):
paths = [paths]
if not isinstance(paths, list):
raise TypeError("paths must be a string or a list.")
if len(paths) < 1:
raise ValueError("len(paths) must be > 0.")
self.paths = paths
self.kwargs = kwargs
def __iter__(self):
for path in self.paths:
yield from GPUFileIterator(path, **self.kwargs)
class Shuffler:
"""
Shuffling the data is an important part of machine learning
training. This class is used by Workflow class and shuffles
the data after all the pre-processing and feature engineering
operators are finished their processing.
Parameters
-----------
out_dir : str
path for the shuffled files
num_out_files : int, default 30
num_threads : int, default 4
"""
def __init__(self, out_dir, num_out_files=30, num_threads=4):
self.queue = queue.Queue(num_threads)
self.write_locks = [threading.Lock() for _ in range(num_out_files)]
self.writer_files = [os.path.join(out_dir, f"{i}.parquet") for i in range(num_out_files)]
self.writers = [ParquetWriter(f, compression=None) for f in self.writer_files]
self.b_idxs = np.arange(num_out_files)
self.num_threads = num_threads
self.num_out_files = num_out_files
# signifies that end-of-data and that the thread should shut down
self._eod = object()
for _ in range(num_threads):
write_thread = threading.Thread(target=self._write_thread, daemon=True)
write_thread.start()
def _write_thread(self):
while True:
item = self.queue.get()
try:
if item is self._eod:
break
idx, data = item
with self.write_locks[idx]:
self.writers[idx].write_table(data)
finally:
self.queue.task_done()
@annotate("add_data", color="orange", domain="nvt_python")
def add_data(self, gdf):
arr = cp.arange(len(gdf))
cp.random.shuffle(arr)
# get slice info
int_slice_size = gdf.shape[0] // self.num_out_files
slice_size = int_slice_size if gdf.shape[0] % int_slice_size == 0 else int_slice_size + 1
np.random.shuffle(self.b_idxs)
for x in range(self.num_out_files):
start = x * slice_size
end = start + slice_size
# check if end is over length
end = end if end <= gdf.shape[0] else gdf.shape[0]
to_write = gdf.iloc[arr[start:end]]
b_idx = self.b_idxs[x]
self.queue.put((b_idx, to_write))
# wait for all writes to finish before exitting (so that we aren't using memory)
self.queue.join()
def close(self):
# wake up all the worker threads and signal for them to exit
for _ in range(self.num_threads):
self.queue.put(self._eod)
# wait for pending writes to finish
self.queue.join()
for writer in self.writers:
writer.close()
class HugeCTR:
"""
Generates outputs for HugeCTR
Parameters
-----------
out_dir : str
path for the shuffled files
num_out_files : int, default 30
num_threads : int, default 4
"""
def __init__(
self, out_dir, num_out_files=30, num_threads=4, cats=None, conts=None, labels=None
):
self.cats = cats
self.conts = conts
self.labels = labels
self.column_names = None
if labels and conts:
self.column_names = labels + conts
self.queue = queue.Queue(num_threads)
self.write_locks = [threading.Lock() for _ in range(num_out_files)]
self.writer_files = [os.path.join(out_dir, f"{i}.data") for i in range(num_out_files)]
file_list_writer = open(os.path.join(out_dir, "file_list.txt"), "w")
file_list_writer.write(str(num_out_files) + "\n")
for f in self.writer_files:
file_list_writer.write(f + "\n")
file_list_writer.close()
self.writers = [open(f, "ab") for f in self.writer_files]
self.num_threads = num_threads
self.num_out_files = num_out_files
self.num_samples = [0] * num_out_files
# signifies that end-of-data and that the thread should shut down
self._eod = object()
for _ in range(num_threads):
write_thread = threading.Thread(target=self._write_thread, daemon=True)
write_thread.start()
def _write_thread(self):
while True:
item = self.queue.get()
try:
if item is self._eod:
break
idx, data = item
ones = np.array(([1] * data.shape[0]), dtype=np.intc)
with self.write_locks[idx]:
df = data[self.column_names].to_pandas().astype(np.single)
for i in range(len(self.cats)):
df["___" + str(i) + "___" + self.cats[i]] = ones
df[self.cats[i]] = data[self.cats[i]].to_pandas().astype(np.longlong)
self.writers[idx].write(df.to_numpy().tobytes())
finally:
self.queue.task_done()
@annotate("add_data", color="orange", domain="nvt_python")
def add_data(self, gdf):
# get slice info
int_slice_size = gdf.shape[0] // self.num_out_files
slice_size = int_slice_size if gdf.shape[0] % int_slice_size == 0 else int_slice_size + 1
for x in range(self.num_out_files):
start = x * slice_size
end = start + slice_size
# check if end is over length
end = end if end <= gdf.shape[0] else gdf.shape[0]
to_write = gdf.iloc[start:end]
self.num_samples[x] = self.num_samples[x] + to_write.shape[0]
self.queue.put((x, to_write))
# wait for all writes to finish before exitting (so that we aren't using memory)
self.queue.join()
def write_header(self):
for i in range(len(self.writers)):
self.writers[i].seek(0)
# error_check (0: no error check; 1: check_num)
# num of samples in this file
# Dimension of the labels
# Dimension of the features
# slot_num for each embedding
# reserved for future use
header = np.array(
[
0,
self.num_samples[i],
len(self.labels),
len(self.conts),
len(self.cats),
0,
0,
0,
],
dtype=np.longlong,
)
self.writers[i].write(header.tobytes())
def set_col_names(self, labels, cats, conts):
self.cats = cats
self.conts = conts
self.labels = labels
self.column_names = labels + conts
def close(self):
# wake up all the worker threads and signal for them to exit
for _ in range(self.num_threads):
self.queue.put(self._eod)
# wait for pending writes to finish
self.queue.join()
self.write_header()
for writer in self.writers:
writer.close()
|
build_incremental_dexmanifest.py
|
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Construct a dex manifest from a set of input .dex.zip files.
Usage: %s <output manifest> <input zip file>*
%s @<params file>
Input files must be either .zip files containing one or more .dex files or
.dex files.
A manifest file is written that contains one line for each input dex in the
following form:
<input zip> <path in input zip> <path in output zip> <MD5 checksum>
or
<input dex> - <path in output zip> <SHA-256 checksum>
"""
import hashlib
import os
# pylint: disable=g-import-not-at-top
try:
# python2 without compatibility package
from Queue import Queue
except ImportError:
# python3
from queue import Queue
import shutil
import sys
import tempfile
from threading import Thread
import zipfile
class DexmanifestBuilder(object):
"""Implementation of the dex manifest builder."""
def __init__(self):
self.manifest_lines = []
self.dir_counter = 1
self.output_dex_counter = 1
self.checksums = set()
self.tmpdir = None
self.queue = Queue()
self.threads_list = list()
def __enter__(self):
self.tmpdir = tempfile.mkdtemp()
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
shutil.rmtree(self.tmpdir, True)
def Checksum(self, filename, input_dex_or_zip, zippath):
"""Compute the SHA-256 checksum of a file.
This method could be invoked concurrently.
Therefore we need to include other metadata like input_dex_or_zip to
keep the context.
"""
h = hashlib.sha256()
with open(filename, "rb") as f:
while True:
data = f.read(65536)
if not data:
break
h.update(data)
return h.hexdigest(), input_dex_or_zip, zippath
def AddDexes(self, dex_metadata_list):
"""Adds all dex file together to the output.
Sort the result to make sure the dexes order are always the same given
the same input.
Args:
dex_metadata_list: A list of [fs_checksum, input_dex_or_zip, zippath],
where fs_checksum is the SHA-256 checksum for dex file, input_dex_or_zip
is the input file written to the manifest, zippath is the zip path
written to the manifest or None if the input file is not a .zip.
Returns:
None.
"""
dex_metadata_list_sorted = sorted(
dex_metadata_list, key=lambda x: (x[1], x[2]))
for dex_metadata in dex_metadata_list_sorted:
fs_checksum, input_dex_or_zip, zippath = dex_metadata[0], dex_metadata[
1], dex_metadata[2]
if fs_checksum in self.checksums:
return
self.checksums.add(fs_checksum)
zip_dex = "incremental_classes%d.dex" % self.output_dex_counter
self.output_dex_counter += 1
self.manifest_lines.append(
"%s %s %s %s" %
(input_dex_or_zip, zippath if zippath else "-", zip_dex, fs_checksum))
def ComputeChecksumConcurrently(self, input_dex_or_zip, zippath, dex):
"""Call Checksum concurrently to improve build performance when an app contains multiple dex files."""
t = Thread(target=lambda q, arg1, arg2, arg3: q.put(self.Checksum(arg1, arg2, arg3)), \
args=(self.queue, dex, input_dex_or_zip, zippath))
t.start()
self.threads_list.append(t)
def Run(self, argv):
"""Creates a dex manifest."""
if len(argv) < 1:
raise Exception("At least one argument expected")
if argv[0][0] == "@":
if len(argv) != 1:
raise IOError("A parameter file should be the only argument")
with open(argv[0][1:]) as param_file:
argv = [a.strip() for a in param_file.readlines()]
for input_filename in argv[1:]:
input_filename = input_filename.strip()
if input_filename.endswith(".zip"):
with zipfile.ZipFile(input_filename, "r") as input_dex_zip:
input_dex_dir = os.path.join(self.tmpdir, str(self.dir_counter))
os.makedirs(input_dex_dir)
self.dir_counter += 1
for input_dex_dex in input_dex_zip.namelist():
if not input_dex_dex.endswith(".dex"):
continue
input_dex_zip.extract(input_dex_dex, input_dex_dir)
fs_dex = input_dex_dir + "/" + input_dex_dex
self.ComputeChecksumConcurrently(input_filename, input_dex_dex,
fs_dex)
elif input_filename.endswith(".dex"):
self.ComputeChecksumConcurrently(input_filename, None, input_filename)
# Collect results from all threads
for t in self.threads_list:
t.join()
results = []
while not self.queue.empty():
fs_checksum, input_dex_or_zip, zippath = self.queue.get()
results.append([fs_checksum, input_dex_or_zip, zippath])
self.AddDexes(results)
with open(argv[0], "wb") as manifest:
manifest.write(("\n".join(self.manifest_lines)).encode("utf-8"))
def main(argv):
with DexmanifestBuilder() as b:
b.Run(argv[1:])
if __name__ == "__main__":
main(sys.argv)
|
maker.py
|
#!/usr/bin/env python
import requests, logging
from threading import Thread, Event
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s')
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
class Maker():
_maker_key = 'nmr2BnBoPJPDkNvfz3bk0'
def send(self, event, params=None):
'''Trigger a maker event passing the event name and parameters as json object'''
# https://maker.ifttt.com/trigger/{event}/with/key/nmr2BnBoPJPDkNvfz3bk0
r = requests.get("https://maker.ifttt.com/trigger/%s/with/key/%s" % (event, self._maker_key), data=params)
_logger.info(r.text)
def send_async(self, event, params=None):
Thread(target=self.send, args=(event, params,)).start()
|
train_hybrid1.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_transformers import BertTokenizer
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.loss import abs_loss
from models.model_builder import HybridSummarizer
from models.predictor import build_predictor
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def train_abs_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_abs_single(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_hybrid(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
if (args.test_start_from != -1 and step < args.test_start_from):
xent_lst.append((1e6, cp))
continue
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_hybrid(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_hybrid(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = HybridSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device)
trainer = build_trainer(args, device_id, model, None, valid_loss)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_hybrid(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = HybridSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def test_text_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = HybridSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, 'cpu',
shuffle=False, is_test=True)
trainer = build_trainer(args, '-1', None, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train_hybrid(args, device_id):
if (args.world_size > 1):
train_abs_multi(args)
else:
train_abs_single(args, device_id)
def train_abs_single(args, device_id):
init_logger(args.log_file)
logger.info(str(args))
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
if (args.load_from_extractive != ''):
logger.info('Loading bert from extractive model %s' % args.load_from_extractive)
bert_from_extractive = torch.load(args.load_from_extractive, map_location=lambda storage, loc: storage)
bert_from_extractive = bert_from_extractive['model']
else:
bert_from_extractive = None
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
if args.is_debugging:
# print("YES it is debugging")
return data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), args.batch_size, device,
shuffle=False, is_test=False)
else:
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = HybridSummarizer(args, device, checkpoint, bert_from_extractive)
if (args.sep_optim):
optim_bert = model_builder.build_optim_bert(args, model, checkpoint)
optim_dec = model_builder.build_optim_dec(args, model, checkpoint)
optim = [optim_bert, optim_dec]
else:
optim = [model_builder.build_optim(args, model, checkpoint)]
logger.info(model)
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, optim, train_loss)
trainer.train(train_iter_fct, args.train_steps)
|
github.py
|
import copy
import json
import re
import threading
import time
from urllib.request import urlopen
from i3pystatus import IntervalModule, formatp
from i3pystatus.core import ConfigError
from i3pystatus.core.desktop import DesktopNotification
from i3pystatus.core.util import user_open, internet, require
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
API_METHODS_URL = 'https://www.githubstatus.com/api/v2/summary.json'
STATUS_URL = 'https://www.githubstatus.com'
NOTIFICATIONS_URL = 'https://github.com/notifications'
AUTH_URL = 'https://api.github.com/notifications'
class Github(IntervalModule):
'''
This module checks the GitHub system status, and optionally the number of
unread notifications.
.. versionchanged:: 3.36
Module now checks system status in addition to unread notifications.
.. note::
For notification checking, the following is required:
- The requests_ module must be installed.
- Either ``access_token`` (recommended) or ``username`` and
``password`` must be used to authenticate to GitHub.
Using an access token is the recommended authentication method. Click
here__ to generate a new access token. Fill in the **Token
description** box, and enable the **notifications** scope by checking
the appropriate checkbox. Then, click the **Generate token** button.
.. important::
An access token is the only supported means of authentication for
this module, if `2-factor authentication`_ is enabled.
.. _requests: https://pypi.python.org/pypi/requests
.. __: https://github.com/settings/tokens/new
.. _`2-factor authentication`: https://help.github.com/articles/about-two-factor-authentication/
See here__ for more information on GitHub's authentication API.
.. __: https://developer.github.com/v3/#authentication
If you would rather use a username and password pair, you can either
pass them as arguments when registering the module, or use i3pystatus'
:ref:`credential management <credentials>` support to store them in a
keyring. Keep in mind that if you do not pass a ``username`` or
``password`` parameter when registering the module, i3pystatus will
still attempt to retrieve these values from a keyring if the keyring_
Python module is installed. This could result in i3pystatus aborting
during startup if it cannot find a usable keyring backend. If you do
not plan to use credential management at all in i3pystatus, then you
should either ensure that A) keyring_ is not installed, or B) both
keyring_ and keyrings.alt_ are installed, to avoid this error.
.. _keyring: https://pypi.python.org/pypi/keyring
.. _keyrings.alt: https://pypi.python.org/pypi/keyrings.alt
.. rubric:: Available formatters
* `{status}` — Current GitHub status. This formatter can be different
depending on the current outage status (``none``, ``minor``, ``major``,
or ``critical``). The content displayed for each of these statuses is
defined in the **status** config option.
* `{unread}` — When there are unread notifications, this formatter will
contain the value of the **unread_marker** marker config option.
there are no unread notifications, it formatter will be an empty string.
* `{unread_count}` — The number of unread notifications
notifications, it will be an empty string.
* `{update_error}` — When an error is encountered updating this module,
this formatter will be set to the value of the **update_error**
config option.
.. rubric:: Click events
This module responds to 4 different click events:
- **Left-click** — Forces an update of the module.
- **Right-click** — Triggers a desktop notification showing the most recent
update to the GitHub status. This is useful when the status changes when
you are away from your computer, so that the updated status can be seen
without visiting the `GitHub Status Dashboard`_. This click event
requires **notify_status** to be set to ``True``.
- **Double left-click** — Opens the GitHub `notifications page`_ in your web
browser.
- **Double right-click** — Opens the `GitHub Status Dashboard`_ in your web
browser.
.. rubric:: Desktop notifications
.. versionadded:: 3.36
If **notify_status** is set to ``True``, a notification will be displayed
when the status reported by the `GitHub Status API`_ changes.
If **notify_unread** is set to ``True``, a notification will be displayed
when new unread notifications are found. Double-clicking the module will
launch the GitHub notifications dashboard in your browser.
.. note::
A notification will be displayed if there was a problem querying the
`GitHub Status API`_, irrespective of whether or not **notify_status**
or **notify_unread** is set to ``True``.
.. rubric:: Example configuration
The below example enables desktop notifications, enables Pango hinting for
differently-colored **update_error** and **refresh_icon** text, and alters
the both the status text and the colors used to visually denote the current
status level. It also sets the log level to debug, for troubleshooting
purposes.
.. code-block:: python
status.register(
'github',
log_level=logging.DEBUG,
notify_status=True,
notify_unread=True,
access_token='0123456789abcdef0123456789abcdef01234567',
hints={'markup': 'pango'},
update_error='<span color="#af0000">!</span>',
refresh_icon='<span color="#ff5f00">⟳</span>',
status={
'none': '✓',
'minor': '!',
'major': '!!',
'critical': '!!!',
},
colors={
'none': '#008700',
'minor': '#d7ff00',
'major': '#af0000',
'critical': '#640000',
},
)
.. note::
Setting debug logging and authenticating with an access token will
include the access token in the log file, as the notification URL is
logged at this level.
.. _`GitHub Status API`: https://www.githubstatus.com/api
.. _`GitHub Status Dashboard`: https://www.githubstatus.com/
.. _`notifications page`: https://github.com/notifications
.. rubric:: Extended string formatting
.. versionadded:: 3.36
This module supports the :ref:`formatp <formatp>` extended string format
syntax. This allows for values to be hidden when they evaluate as False.
The default ``format`` string value for this module makes use of this
syntax to conditionally show the value of the ``update_error`` config value
when the backend encounters an error during an update, but this can also
be used to only show the number of unread notifications when that number is
not **0**. The below example would show the unread count as **(3)** when
there are 3 unread notifications, but would show nothing when there are no
unread notifications.
.. code-block:: python
status.register(
'github',
notify_status=True,
notify_unread=True,
access_token='0123456789abcdef0123456789abcdef01234567',
format='{status}[ ({unread_count})][ {update_error}]'
)
'''
settings = (
('format', 'format string'),
('status', 'Dictionary mapping statuses to the text which represents '
'that status type. This defaults to ``GitHub`` for all '
'status types.'),
('colors', 'Dictionary mapping statuses to the color used to display '
'the status text'),
('refresh_icon', 'Text to display (in addition to any text currently '
'shown by the module) when refreshing the GitHub '
'status. **NOTE:** Depending on how quickly the '
'update is performed, the icon may not be displayed.'),
('update_error', 'Value for the ``{update_error}`` formatter when an '
'error is encountered while checking GitHub status'),
('keyring_backend', 'alternative keyring backend for retrieving '
'credentials'),
('username', ''),
('password', ''),
('access_token', ''),
('unread_marker', 'Defines the string that the ``{unread}`` formatter '
'shows when there are pending notifications'),
('notify_status', 'Set to ``True`` to display a desktop notification '
'on status changes'),
('notify_unread', 'Set to ``True`` to display a desktop notification '
'when new notifications are detected'),
('unread_notification_template',
'String with no more than one ``%d``, which will be replaced by '
'the number of new unread notifications. Useful for those with '
'non-English locales who would like the notification to be in '
'their native language. The ``%d`` can be omitted if desired.'),
('api_methods_url', 'URL from which to retrieve the API endpoint URL '
'which this module will use to check the GitHub '
'Status'),
('status_url', 'The URL to the status page (opened when the module is '
'double-clicked with the right mouse button'),
('notifications_url', 'The URL to the GitHub notifications page '
'(opened when the module is double-clicked with '
'the left mouse button'),
)
# Defaults for module configurables
_default_colors = {
'none': '#28a745',
'maintenance': '#4f8cc9',
'minor': '#dbab09',
'major': '#e36209',
'critical': '#dc3545',
}
# Module configurables
format = '{status}[ {unread}][ {update_error}]'
status = {}
colors = _default_colors
refresh_icon = '⟳'
update_error = '!'
username = ''
password = ''
access_token = ''
unread_marker = '•'
notify_status = False
notify_unread = False
unread_notification_template = 'You have %d new notification(s)'
api_methods_url = API_METHODS_URL
status_url = STATUS_URL
notifications_url = NOTIFICATIONS_URL
# Global configurables
interval = 600
max_error_len = 50
keyring_backend = None
# Other
unread = ''
unknown_color = None
unknown_status = '?'
failed_update = False
__previous_json = None
__current_json = None
new_unread = None
previous_unread = None
current_unread = None
config_error = None
data = {'status': '',
'unread': 0,
'unread_count': '',
'update_error': ''}
output = {'full_text': '', 'color': None}
# Click events
on_leftclick = ['perform_update']
on_rightclick = ['show_status_notification']
on_doubleleftclick = ['launch_notifications_url']
on_doublerightclick = ['launch_status_url']
@require(internet)
def launch_status_url(self):
self.logger.debug('Launching %s in browser', self.status_url)
user_open(self.status_url)
@require(internet)
def launch_notifications_url(self):
self.logger.debug('Launching %s in browser', self.notifications_url)
user_open(self.notifications_url)
def init(self):
if self.colors != self._default_colors:
new_colors = copy.copy(self._default_colors)
new_colors.update(self.colors)
self.colors = new_colors
self.logger.debug('colors = %s', self.colors)
self.condition = threading.Condition()
self.thread = threading.Thread(target=self.update_loop, daemon=True)
self.thread.start()
def update_loop(self):
try:
self.perform_update()
while True:
with self.condition:
self.condition.wait(self.interval)
self.perform_update()
except Exception:
msg = 'Exception in {thread} at {time}, module {name}'.format(
thread=threading.current_thread().name,
time=time.strftime('%c'),
name=self.__class__.__name__,
)
self.logger.error(msg, exc_info=True)
@require(internet)
def status_api_request(self, url):
self.logger.debug('Making GitHub Status API request to %s', url)
try:
with urlopen(url) as content:
try:
content_type = dict(content.getheaders())['Content-Type']
charset = re.search(r'charset=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
response_json = content.read().decode(charset).strip()
if not response_json:
self.logger.error('JSON response from %s was blank', url)
return {}
try:
response = json.loads(response_json)
except json.decoder.JSONDecodeError as exc:
self.logger.error('Error loading JSON: %s', exc)
self.logger.debug('JSON text that failed to load: %s',
response_json)
return {}
self.logger.log(5, 'API response: %s', response)
return response
except Exception as exc:
self.logger.error(
'Failed to make API request to %s. Exception follows:', url,
exc_info=True
)
return {}
def detect_status_change(self, response=None):
if response is not None:
# Compare last update to current and exit without displaying a
# notification if one is not needed.
if self.__previous_json is None:
# This is the first time status has been updated since
# i3pystatus was started. Set self.__previous_json and exit.
self.__previous_json = response
return
if response.get('status', {}).get('description') == self.__previous_json.get('status', {}).get('description'):
# No change, so no notification
return
self.__previous_json = response
if self.__previous_json is None:
# The only way this would happen is if we invoked the right-click
# event before we completed the initial status check.
return
self.show_status_notification()
@staticmethod
def notify(message):
return DesktopNotification(title='GitHub', body=message).display()
def skip_notify(self, message):
self.logger.debug(
'Desktop notifications turned off. Skipped notification: %s',
message
)
return False
def show_status_notification(self):
message = self.current_status_description
self.skip_notify(message) \
if not self.notify_status or (self.previous_status is None and self.current_status == 'none') \
else self.notify(message)
def show_unread_notification(self):
if '%d' not in self.unread_notification_template:
formatted = self.unread_notification_template
else:
try:
new_unread = len(self.new_unread)
except TypeError:
new_unread = 0
try:
formatted = self.unread_notification_template % new_unread
except TypeError as exc:
self.logger.error(
'Failed to format {0!r}: {1}'.format(
self.unread_notification_template,
exc
)
)
return False
return self.skip_notify(formatted) \
if not self.notify_unread \
else self.notify(formatted)
@require(internet)
def perform_update(self):
self.output['full_text'] = \
self.refresh_icon + self.output.get('full_text', '')
self.failed_update = False
self.update_status()
try:
self.config_error = None
self.update_unread()
except ConfigError as exc:
self.config_error = exc
self.data['update_error'] = self.update_error \
if self.failed_update \
else ''
self.refresh_display()
@property
def current_incidents(self):
try:
return self.__current_json['incidents']
except (KeyError, TypeError):
return []
@property
def previous_incidents(self):
try:
return self.__previous_json['incidents']
except (KeyError, TypeError):
return []
@property
def current_status(self):
try:
return self.__current_json['status']['indicator']
except (KeyError, TypeError):
return None
@property
def previous_status(self):
try:
return self.__previous_json['status']['indicator']
except (KeyError, TypeError):
return None
@property
def current_status_description(self):
try:
return self.__current_json['status']['description']
except (KeyError, TypeError):
return None
@require(internet)
def update_status(self):
try:
# Get most recent update
self.__current_json = self.status_api_request(self.api_methods_url)
if not self.__current_json:
self.failed_update = True
return
self.logger.debug('Current GitHub Status: %s', self.current_status)
self.data['status'] = self.status.get(self.current_status, 'GitHub')
if self.current_incidents != self.previous_incidents:
self.show_status_notification()
self.__previous_json = self.__current_json
except Exception:
# Don't let an uncaught exception kill the update thread
self.logger.error(
'Uncaught error occurred while checking GitHub status. '
'Exception follows:', exc_info=True
)
self.failed_update = True
@require(internet)
def update_unread(self):
# Reset the new_unread attribute to prevent spurious notifications
self.new_unread = None
try:
if not self.username and not self.password and not self.access_token:
# Auth not configured
self.logger.debug(
'No auth configured, notifications will not be checked')
return True
if not HAS_REQUESTS:
self.logger.error(
'The requests module is required to check GitHub notifications')
self.failed_update = True
return False
self.logger.debug(
'Checking unread notifications using %s',
'access token' if self.access_token else 'username/password'
)
if self.access_token:
request_kwargs = {
'headers': {
'Authorization': 'token {}'.format(self.access_token),
},
}
else:
request_kwargs = {
'auth': (self.username, self.password),
}
self.current_unread = set()
page_num = 0
old_unread_url = None
unread_url = AUTH_URL
while old_unread_url != unread_url:
old_unread_url = unread_url
page_num += 1
self.logger.debug(
'Reading page %d of notifications (%s)',
page_num, unread_url
)
try:
response = requests.get(unread_url, **request_kwargs)
self.logger.log(
5,
'Raw return from GitHub notification check: %s',
response.text)
unread_data = json.loads(response.text)
except (requests.ConnectionError, requests.Timeout) as exc:
self.logger.error(
'Failed to check unread notifications: %s', exc)
self.failed_update = True
return False
except json.decoder.JSONDecodeError as exc:
self.logger.error('Error loading JSON: %s', exc)
self.logger.debug(
'JSON text that failed to load: %s', response.text)
self.failed_update = True
return False
# Bad credentials or some other error
if isinstance(unread_data, dict):
raise ConfigError(
unread_data.get(
'message',
'Unknown error encountered retrieving unread notifications'
)
)
# Update the current count of unread notifications
self.current_unread.update(
[x['id'] for x in unread_data if 'id' in x]
)
# Check 'Link' header for next page of notifications
# (https://tools.ietf.org/html/rfc5988#section-5)
self.logger.debug('Checking for next page of notifications')
try:
link_header = response.headers['Link']
except AttributeError:
self.logger.error(
'No headers present in response. This might be due to '
'an API change in the requests module.'
)
self.failed_update = True
continue
except KeyError:
self.logger.debug('Only one page of notifications present')
continue
else:
# Process 'Link' header
try:
links = requests.utils.parse_header_links(link_header)
except Exception as exc:
self.logger.error(
'Failed to parse \'Link\' header: %s', exc
)
self.failed_update = True
continue
for link in links:
try:
link_rel = link['rel']
if link_rel != 'next':
# Link does not refer to the next page, skip it
continue
# Set the unread_url so that when we reach the top
# of the outer loop, we have a new URL to check.
unread_url = link['url']
break
except TypeError:
# Malformed hypermedia link
self.logger.warning(
'Malformed hypermedia link (%s) in \'Link\' '
'header (%s)', link, links
)
continue
else:
self.logger.debug('No more pages of notifications remain')
if self.failed_update:
return False
self.data['unread_count'] = len(self.current_unread)
self.data['unread'] = self.unread_marker \
if self.data['unread_count'] > 0 \
else ''
if self.previous_unread is not None:
if not self.current_unread.issubset(self.previous_unread):
self.new_unread = self.current_unread - self.previous_unread
if self.new_unread:
self.show_unread_notification()
self.previous_unread = self.current_unread
return True
except ConfigError as exc:
# This will be caught by the calling function
raise exc
except Exception as exc:
# Don't let an uncaught exception kill the update thread
self.logger.error(
'Uncaught error occurred while checking GitHub notifications. '
'Exception follows:', exc_info=True
)
self.failed_update = True
return False
def refresh_display(self):
previous_color = self.output.get('color')
try:
color = self.colors.get(
self.current_status,
self.unknown_color)
except TypeError:
# Shouldn't get here, but this would happen if this function is
# called before we check the current status for the first time.
color = previous_color
self.output = {'full_text': formatp(self.format, **self.data).strip(),
'color': color}
def run(self):
if self.config_error is not None:
raise self.config_error
|
health_manager.py
|
from template_finder import TemplateFinder
from ui_manager import UiManager
from belt_manager import BeltManager
from pather import Location
import cv2
import time
import keyboard
from utils.custom_mouse import mouse
from utils.misc import cut_roi, color_filter, wait
from logger import Logger
from screen import Screen
import numpy as np
import time
from config import Config
class HealthManager:
def __init__(self, screen: Screen):
self._config = Config()
self._screen = screen
self._template_finder = TemplateFinder(screen)
self._ui_manager = UiManager(screen, self._template_finder)
self._belt_manager = BeltManager(screen, self._template_finder)
self._do_monitor = False
self._did_chicken = False
self._last_rejuv = time.time()
self._last_health = time.time()
self._last_mana = time.time()
self._last_merc_healh = time.time()
self._callback = None
self._pausing = True
def stop_monitor(self):
self._do_monitor = False
def set_callback(self, callback):
self._callback = callback
def did_chicken(self):
return self._did_chicken
def reset_chicken_flag(self):
self._did_chicken = False
self._pausing = True
def update_location(self, loc: Location):
if loc is not None:
bosses = ["shenk", "eldritch", "pindle"]
self._pausing = not any(substring in loc for substring in bosses)
@staticmethod
def get_health(config: Config, img: np.ndarray) -> float:
health_rec = [config.ui_pos["health_left"], config.ui_pos["health_top"], config.ui_pos["health_width"], config.ui_pos["health_height"]]
health_img = cut_roi(img, health_rec)
# red mask
mask1, _ = color_filter(health_img, [np.array([0, 110, 20]), np.array([2, 255, 255])])
mask2, _ = color_filter(health_img, [np.array([178, 110, 20]), np.array([180, 255, 255])])
mask = cv2.bitwise_or(mask1, mask2)
health_percentage = (float(np.sum(mask)) / mask.size) * (1/255.0)
# green (in case of poison)
mask, _ = color_filter(health_img, [np.array([47, 90, 20]), np.array([54, 255, 255])])
health_percentage_green = (float(np.sum(mask)) / mask.size) * (1/255.0)
return max(health_percentage, health_percentage_green)
@staticmethod
def get_mana(config: Config, img: np.ndarray) -> float:
mana_rec = [config.ui_pos["mana_left"], config.ui_pos["mana_top"], config.ui_pos["mana_width"], config.ui_pos["mana_height"]]
mana_img = cut_roi(img, mana_rec)
mask, _ = color_filter(mana_img, [np.array([117, 120, 20]), np.array([121, 255, 255])])
mana_percentage = (float(np.sum(mask)) / mask.size) * (1/255.0)
return mana_percentage
@staticmethod
def get_merc_health(config: Config, img: np.ndarray) -> float:
health_rec = [config.ui_pos["merc_health_left"], config.ui_pos["merc_health_top"], config.ui_pos["merc_health_width"], config.ui_pos["merc_health_height"]]
merc_health_img = cut_roi(img, health_rec)
merc_health_img = cv2.cvtColor(merc_health_img, cv2.COLOR_BGR2GRAY)
_, health_tresh = cv2.threshold(merc_health_img, 5, 255, cv2.THRESH_BINARY)
merc_health_percentage = (float(np.sum(health_tresh)) / health_tresh.size) * (1/255.0)
return merc_health_percentage
def _do_chicken(self, img):
if self._callback is not None:
self._callback()
self._callback = None
if self._config.general["info_screenshots"]:
cv2.imwrite("./info_screenshots/info_debug_chicken_" + time.strftime("%Y%m%d_%H%M%S") + ".png", img)
# clean up key presses that might be pressed in the run_thread
keyboard.release(self._config.char["stand_still"])
wait(0.02, 0.05)
keyboard.release(self._config.char["show_items"])
wait(0.02, 0.05)
mouse.release(button="left")
wait(0.02, 0.05)
mouse.release(button="right")
time.sleep(0.01)
wait(1.0, 2.0)
self._ui_manager.save_and_exit(does_chicken=True)
self._did_chicken = True
self._pausing = True
def start_monitor(self):
Logger.info("Start health monitoring")
self._do_monitor = True
self._did_chicken = False
start = time.time()
while self._do_monitor:
time.sleep(0.1)
# Wait until the flag is reset by run.py
if self._did_chicken or self._pausing: continue
img = self._screen.grab()
# TODO: Check if in town or not! Otherwise risk endless chicken loop
ingame_template_match = self._template_finder.search("WINDOW_INGAME_OFFSET_REFERENCE", img, roi=self._config.ui_roi["window_ingame_ref"], threshold=0.9)
if ingame_template_match.valid:
health_percentage = self.get_health(self._config, img)
mana_percentage = self.get_mana(self._config, img)
# check rejuv
success_drink_rejuv = False
last_drink = time.time() - self._last_rejuv
if (health_percentage < self._config.char["take_rejuv_potion_health"] and last_drink > 1) or \
(mana_percentage < self._config.char["take_rejuv_potion_mana"] and last_drink > 2):
success_drink_rejuv = self._belt_manager.drink_potion("rejuv", stats=[health_percentage, mana_percentage])
self._last_rejuv = time.time()
# in case no rejuv was used, check for chicken, health pot and mana pot usage
if not success_drink_rejuv:
# check health
last_drink = time.time() - self._last_health
if health_percentage < self._config.char["take_health_potion"] and last_drink > 3.5:
self._belt_manager.drink_potion("health", stats=[health_percentage, mana_percentage])
self._last_health = time.time()
# give the chicken a 6 sec delay to give time for a healing pot and avoid endless loop of chicken
elif health_percentage < self._config.char["chicken"] and (time.time() - start) > 6:
Logger.warning(f"Trying to chicken, player HP {(health_percentage*100):.1f}%!")
self._do_chicken(img)
# check mana
last_drink = time.time() - self._last_mana
if mana_percentage < self._config.char["take_mana_potion"] and last_drink > 4:
self._belt_manager.drink_potion("mana", stats=[health_percentage, mana_percentage])
self._last_mana = time.time()
# check merc
merc_alive = self._template_finder.search("MERC", img, roi=self._config.ui_roi["merc_icon"]).valid
if merc_alive:
merc_health_percentage = self.get_merc_health(self._config, img)
last_drink = time.time() - self._last_merc_healh
if merc_health_percentage < self._config.char["merc_chicken"]:
Logger.warning(f"Trying to chicken, merc HP {(merc_health_percentage*100):.1f}%!")
self._do_chicken(img)
if merc_health_percentage < self._config.char["heal_rejuv_merc"] and last_drink > 4.0:
self._belt_manager.drink_potion("rejuv", merc=True, stats=[merc_health_percentage])
self._last_merc_healh = time.time()
elif merc_health_percentage < self._config.char["heal_merc"] and last_drink > 7.0:
self._belt_manager.drink_potion("health", merc=True, stats=[merc_health_percentage])
self._last_merc_healh = time.time()
Logger.debug("Stop health monitoring")
# Testing: Start dying or lossing mana and see if it works
if __name__ == "__main__":
import threading
config = Config()
screen = Screen(config.general["monitor"])
manager = HealthManager(screen)
health_monitor_thread = threading.Thread(target=manager.start_monitor)
health_monitor_thread.daemon = True
health_monitor_thread.start()
manager.set_callback(lambda: print("Hallo CB"))
health_monitor_thread.join()
|
select_ticket_info.py
|
# -*- coding=utf-8 -*-
import datetime
import random
import os
import socket
import sys
import threading
import time
import TickerConfig
import wrapcache
from agency.cdn_utils import CDNProxy
from config import urlConf, configCommon
from config.TicketEnmu import ticket
from config.configCommon import seat_conf_2, seat_conf
from config.getCookie import getDrvicesID
from init.login import GoLogin
from inter.AutoSubmitOrderRequest import autoSubmitOrderRequest
from inter.ChechFace import chechFace
from inter.CheckUser import checkUser
from inter.GetPassengerDTOs import getPassengerDTOs
from inter.LiftTicketInit import liftTicketInit
from inter.Query import query
from inter.SubmitOrderRequest import submitOrderRequest
from myException.PassengerUserException import PassengerUserException
from myException.UserPasswordException import UserPasswordException
from myException.ticketConfigException import ticketConfigException
from myException.ticketIsExitsException import ticketIsExitsException
from myException.ticketNumOutException import ticketNumOutException
from myUrllib.httpUtils import HTTPClient
class select:
"""
快速提交车票通道
"""
def __init__(self):
self.get_ticket_info()
self._station_seat = [seat_conf[x] for x in TickerConfig.SET_TYPE]
self.auto_code_type = 2
self.httpClint = HTTPClient(TickerConfig.IS_PROXY)
self.urls = urlConf.urls
self.login = GoLogin(self, TickerConfig.IS_AUTO_CODE, self.auto_code_type)
self.cdn_list = []
self.cookies = ""
self.queryUrl = "leftTicket/queryA"
self.passengerTicketStrList = ""
self.passengerTicketStrByAfterLate = ""
self.oldPassengerStr = ""
self.set_type = ""
self.flag = True
@staticmethod
def get_ticket_info():
"""
获取配置信息
:return:
"""
print(u"*" * 50)
print(f"检查当前版本为: {TickerConfig.RE_VERSION}")
print(u"检查当前python版本为:{},目前版本只支持3.6以上".format(sys.version.split(" ")[0]))
print(u"12306刷票小助手,最后更新于2019.09.09,请勿作为商业用途,交流群号:"
u" 1群:286271084(已满)\n"
u" 2群:649992274(已满)\n"
u" 3群:632501142(已满)\n"
u" 4群: 606340519(已满)\n"
u" 5群: 948526733(已满)\n"
u" 6群: 608792930(未满)\n"
u" 7群: 660689659(已满)\n"
u" 8群: 620629239(未满)\n"
)
print(
f"当前配置:\n出发站:{TickerConfig.FROM_STATION}\n到达站:{TickerConfig.TO_STATION}\n乘车日期:{','.join(TickerConfig.STATION_DATES)}\n坐席:{','.join(TickerConfig.SET_TYPE)}\n是否有票优先提交:{TickerConfig.IS_MORE_TICKET}\n乘车人:{TickerConfig.TICKET_PEOPLES}\n" \
f"刷新间隔: 随机(1-3S)\n僵尸票关小黑屋时长: {TickerConfig.TICKET_BLACK_LIST_TIME}\n下单接口: {TickerConfig.ORDER_TYPE}\n下单模式: {TickerConfig.ORDER_MODEL}\n预售踩点时间:{TickerConfig.OPEN_TIME}")
print(u"*" * 50)
def station_table(self, from_station, to_station):
"""
读取车站信息
:param station:
:return:
"""
path = os.path.join(os.path.dirname(__file__), '../station_name.txt')
try:
with open(path, encoding="utf-8") as result:
info = result.read().split('=')[1].strip("'").split('@')
except Exception:
with open(path) as result:
info = result.read().split('=')[1].strip("'").split('@')
del info[0]
station_name = {}
for i in range(0, len(info)):
n_info = info[i].split('|')
station_name[n_info[1]] = n_info[2]
try:
from_station = station_name[from_station.encode("utf8")]
to_station = station_name[to_station.encode("utf8")]
except KeyError:
from_station = station_name[from_station]
to_station = station_name[to_station]
return from_station, to_station
def call_login(self, auth=False):
"""
登录回调方法
:return:
"""
if auth:
return self.login.auth()
else:
configCommon.checkSleepTime(self) # 防止网上启动晚上到点休眠
self.login.go_login()
def cdn_req(self, cdn):
for i in range(len(cdn) - 1):
http = HTTPClient(0)
http.set_cookies(self.cookies)
urls = self.urls["loginInitCdn"]
http._cdn = cdn[i].replace("\n", "")
start_time = datetime.datetime.now()
rep = http.send(urls)
if rep and "message" not in rep and (datetime.datetime.now() - start_time).microseconds / 1000 < 500:
if cdn[i].replace("\n", "") not in self.cdn_list: # 如果有重复的cdn,则放弃加入
# print(u"加入cdn {0}".format(cdn[i].replace("\n", "")))
self.cdn_list.append(cdn[i].replace("\n", ""))
print(u"所有cdn解析完成...")
def cdn_certification(self):
"""
cdn 认证
:return:
"""
if TickerConfig.IS_CDN == 1:
CDN = CDNProxy()
all_cdn = CDN.open_cdn_file()
if all_cdn:
# print(u"由于12306网站策略调整,cdn功能暂时关闭。")
print(u"开启cdn查询")
print(u"本次待筛选cdn总数为{}, 筛选时间大约为5-10min".format(len(all_cdn)))
t = threading.Thread(target=self.cdn_req, args=(all_cdn,))
t.setDaemon(True)
# t2 = threading.Thread(target=self.set_cdn, args=())
t.start()
# t2.start()
else:
raise ticketConfigException(u"cdn列表为空,请先加载cdn")
def main(self):
l = liftTicketInit(self)
l.reqLiftTicketInit()
getDrvicesID(self)
self.call_login()
self.cdn_certification()
check_user = checkUser(self)
t = threading.Thread(target=check_user.sendCheckUser)
t.setDaemon(True)
t.start()
from_station, to_station = self.station_table(TickerConfig.FROM_STATION, TickerConfig.TO_STATION)
num = 0
s = getPassengerDTOs(session=self, ticket_peoples=TickerConfig.TICKET_PEOPLES)
passenger = s.sendGetPassengerDTOs()
wrapcache.set("user_info", passenger, timeout=9999999)
while 1:
try:
num += 1
now = datetime.datetime.now() # 感谢群里大佬提供整点代码
configCommon.checkSleepTime(self) # 晚上到点休眠
if TickerConfig.ORDER_MODEL is 1:
sleep_time_s = 0.5
sleep_time_t = 0.6
# 测试了一下有微妙级的误差,应该不影响,测试结果:2019-01-02 22:30:00.004555,预售还是会受到前一次刷新的时间影响,暂时没想到好的解决方案
while not now.strftime("%H:%M:%S") == TickerConfig.OPEN_TIME:
now = datetime.datetime.now()
if now.strftime("%H:%M:%S") > TickerConfig.OPEN_TIME:
break
time.sleep(0.0001)
else:
sleep_time_s = 0.5
sleep_time_t = 3
q = query(session=self,
from_station=from_station,
to_station=to_station,
from_station_h=TickerConfig.FROM_STATION,
to_station_h=TickerConfig.TO_STATION,
_station_seat=self._station_seat,
station_trains=TickerConfig.STATION_TRAINS,
station_dates=TickerConfig.STATION_DATES,
ticke_peoples_num=len(TickerConfig.TICKET_PEOPLES),
)
queryResult = q.sendQuery()
# 查询接口
if queryResult.get("status", False):
train_no = queryResult.get("train_no", "")
train_date = queryResult.get("train_date", "")
stationTrainCode = queryResult.get("stationTrainCode", "")
secretStr = queryResult.get("secretStr", "")
secretList = queryResult.get("secretList", "")
seat = queryResult.get("seat", "")
leftTicket = queryResult.get("leftTicket", "")
query_from_station_name = queryResult.get("query_from_station_name", "")
query_to_station_name = queryResult.get("query_to_station_name", "")
is_more_ticket_num = queryResult.get("is_more_ticket_num", len(TickerConfig.TICKET_PEOPLES))
if wrapcache.get(train_no):
print(ticket.QUEUE_WARNING_MSG.format(train_no))
else:
# 获取联系人
s = getPassengerDTOs(session=self, ticket_peoples=TickerConfig.TICKET_PEOPLES,
set_type="" if isinstance(seat, list) else seat_conf_2[seat],
# 候补订单需要设置多个坐席
is_more_ticket_num=is_more_ticket_num)
getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr(secretStr, secretList)
if getPassengerDTOsResult.get("status", False):
self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "")
self.passengerTicketStrByAfterLate = getPassengerDTOsResult.get(
"passengerTicketStrByAfterLate", "")
self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "")
self.set_type = getPassengerDTOsResult.get("set_type", "")
# 提交订单
# 订单分为两种,一种为抢单,一种为候补订单
if secretStr: # 正常下单
if TickerConfig.ORDER_TYPE == 1: # 快速下单
a = autoSubmitOrderRequest(session=self,
secretStr=secretStr,
train_date=train_date,
passengerTicketStr=self.passengerTicketStrList,
oldPassengerStr=self.oldPassengerStr,
train_no=train_no,
stationTrainCode=stationTrainCode,
leftTicket=leftTicket,
set_type=self.set_type,
query_from_station_name=query_from_station_name,
query_to_station_name=query_to_station_name,
)
a.sendAutoSubmitOrderRequest()
elif TickerConfig.ORDER_TYPE == 2: # 普通下单
sor = submitOrderRequest(self, secretStr, from_station, to_station, train_no,
self.set_type,
self.passengerTicketStrList, self.oldPassengerStr, train_date,
TickerConfig.TICKET_PEOPLES)
sor.sendSubmitOrderRequest()
elif secretList: # 候补订单
c = chechFace(self, secretList, train_no)
c.sendChechFace()
else:
random_time = round(random.uniform(sleep_time_s, sleep_time_t), 2)
nateMsg = ' 无候补机会' if TickerConfig.ORDER_TYPE == 2 else ""
print(f"正在第{num}次查询 随机停留时长:{random_time} 乘车日期: {','.join(TickerConfig.STATION_DATES)} 车次:{'.'.join(TickerConfig.STATION_TRAINS)} 下单无票{nateMsg} 耗时:{(datetime.datetime.now() - now).microseconds / 1000}ms")
time.sleep(random_time)
except PassengerUserException as e:
print(e)
break
except ticketConfigException as e:
print(e)
break
except ticketIsExitsException as e:
print(e)
break
except ticketNumOutException as e:
print(e)
break
except UserPasswordException as e:
print(e)
break
except ValueError as e:
if e == "No JSON object could be decoded":
print(u"12306接口无响应,正在重试")
else:
print(e)
except KeyError as e:
print(e)
except TypeError as e:
print(u"12306接口无响应,正在重试 {0}".format(e))
except socket.error as e:
print(e)
if __name__ == '__main__':
s = select()
cdn = s.station_table("长沙", "深圳")
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import deserialize_xpub
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "MonetaryUnit"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
conftest.py
|
# Copyright (c) 2019 SUSE LINUX GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
import threading
from tests.config import settings, converter
from tests.lib import common
from tests.lib.workspace import Workspace
if settings.HARDWARE_PROVIDER.upper() == 'OPENSTACK':
from tests.lib.hardware.openstack_sdk import Hardware as Hardware # type: ignore # noqa: E501
elif settings.HARDWARE_PROVIDER.upper() == 'LIBVIRT':
from tests.lib.hardware.libvirt import Hardware as Hardware # type: ignore
elif settings.HARDWARE_PROVIDER.upper() == 'AWS_EC2':
from tests.lib.hardware.aws_ec2 import Hardware as Hardware # type: ignore
else:
raise Exception("Hardware provider '{}' not yet supported by "
"rookcheck".format(settings.HARDWARE_PROVIDER))
if settings.DISTRO == 'SLES_CaaSP':
from tests.lib.kubernetes.caasp import CaaSP as Kubernetes
from tests.lib.rook.ses import RookSes as RookCluster
else:
from tests.lib.kubernetes.vanilla import Vanilla as Kubernetes # type: ignore # noqa: E501
from tests.lib.rook.upstream import RookCluster as RookCluster # type: ignore # noqa: E501
logger = logging.getLogger(__name__)
# NOTE(jhesketh): Important! When creating a fixture that uses a context
# manager, the __exit__ method is not called if the object is
# unabled to be instantiated. In other words, if __init__ fails
# then no cleanup can occur.
# Therefore, be careful when creating fixtures to not put
# anything particularly time consuming, expensive, or prone to
# failure in the constructor. Instead, move them into a
# separate bootstrapping so that any failures to create
# resources can still be cleaned up.
def _print_config():
logger.info("#"*120)
logger.info("# Rookcheck Settings:")
logger.info("# ===================")
logger.info(f"# ROOKCHECK_CLUSTER_PREFIX={settings.CLUSTER_PREFIX}")
logger.info(f"# ROOKCHECK_WORKSPACE_DIR={settings.WORKSPACE_DIR}")
logger.info(f"# ROOKCHECK_NUMBER_MASTERS={settings.NUMBER_MASTERS}")
logger.info(f"# ROOKCHECK_NUMBER_WORKERS={settings.NUMBER_WORKERS}")
logger.info(
f"# ROOKCHECK_WORKER_INITIAL_DATA_DISKS="
f"{settings.WORKER_INITIAL_DATA_DISKS}")
logger.info(f"# ROOKCHECK_NODE_IMAGE_USER={settings.NODE_IMAGE_USER}")
logger.info(f"# ROOKCHECK__USE_THREADS={settings._USE_THREADS}")
logger.info(f"# ROOKCHECK__REMOVE_WORKSPACE={settings._REMOVE_WORKSPACE}")
logger.info(
f"# ROOKCHECK__TEAR_DOWN_CLUSTER={settings._TEAR_DOWN_CLUSTER}")
logger.info(
f"# ROOKCHECK__TEAR_DOWN_CLUSTER_CONFIRM="
f"{settings._TEAR_DOWN_CLUSTER_CONFIRM}")
logger.info(f"# ROOKCHECK__GATHER_LOGS_DIR={settings._GATHER_LOGS_DIR}")
logger.info(f"# ROOKCHECK_HARDWARE_PROVIDER={settings.HARDWARE_PROVIDER}")
logger.info("# Hardware provider specific config:")
logger.info("# ----------------------------------")
if settings.HARDWARE_PROVIDER.upper() == "OPENSTACK":
logger.info(
f"# ROOKCHECK_OPENSTACK__NODE_IMAGE="
f"{settings.OPENSTACK.NODE_IMAGE}")
logger.info(
f"# ROOKCHECK_OPENSTACK__NODE_SIZE="
f"{settings.OPENSTACK.NODE_SIZE}")
logger.info(
f"# ROOKCHECK_OPENSTACK__EXTERNAL_NETWORK="
f"{settings.OPENSTACK.EXTERNAL_NETWORK}")
elif settings.HARDWARE_PROVIDER.upper() == "LIBVIRT":
logger.info(
f"# ROOKCHECK_LIBVIRT__CONNECTION="
f"{settings.LIBVIRT.CONNECTION}")
logger.info(
f"# ROOKCHECK_LIBVIRT__NETWORK_RANGE="
f"{settings.LIBVIRT.NETWORK_RANGE}")
logger.info(
f"# ROOKCHECK_LIBVIRT__NETWORK_SUBNET="
f"{settings.LIBVIRT.NETWORK_SUBNET}")
logger.info(
f"# ROOKCHECK_LIBVIRT__IMAGE={settings.LIBVIRT.IMAGE}")
logger.info(
f"# ROOKCHECK_LIBVIRT__VM_MEMORY={settings.LIBVIRT.VM_MEMORY}")
elif settings.HARDWARE_PROVIDER.upper() == "AWS_EC2":
logger.info(
f"# ROOKCHECK_AWS.AMI_IMAGE_ID={settings.AWS.AMI_IMAGE_ID}")
logger.info(
f"# ROOKCHECK_AWS.NODE_SIZE={settings.AWS.NODE_SIZE}")
logger.info(f"# ROOKCHECK_DISTRO={settings.DISTRO}")
logger.info("# Distro specific config:")
logger.info("# -----------------------")
if settings.DISTRO == 'SLES_CaaSP':
logger.info(
f"# ROOKCHECK_SES__TARGET={settings.SES.TARGET}")
logger.info(
'# SES Repositories:')
for repo, url in settings(
f'SES.{settings.SES.TARGET}.repositories').items():
logger.info(
f'# - {repo} : {url}')
logger.info(
'# YAML Replacements:')
for key, value in settings(
f'SES.{settings.SES.TARGET}.yaml_substitutions').items():
logger.info(
f'# - {key} = {value}')
elif settings.DISTRO == 'openSUSE_k8s':
logger.info(
f"# ROOKCHECK_UPSTREAM_ROOK__BUILD_ROOK_FROM_GIT="
f"{settings.UPSTREAM_ROOK.BUILD_ROOK_FROM_GIT}")
logger.info("#")
logger.info("# Environment Variables:")
logger.info("# ======================")
for name, value in sorted(os.environ.items()):
logger.info(f"# {name}={value}")
logger.info("#"*120)
def _check_docker_requirement():
logger.debug("Checking if docker is running...")
if settings.DISTRO == 'openSUSE_k8s' and \
converter('@bool', settings.UPSTREAM_ROOK.BUILD_ROOK_FROM_GIT):
rc, out, err = common.execute('docker ps', log_stdout=False)
if rc != 0:
raise Exception("Docker is not running - see manual.")
logger.debug("... Docker appears to be ready")
@pytest.fixture(scope="module")
def preflight_checks():
# Do some checks before starting and print debug information
_print_config()
_check_docker_requirement()
@pytest.fixture(scope="module")
def workspace(preflight_checks):
with Workspace() as workspace:
yield workspace
@pytest.fixture(scope="module")
def hardware(workspace):
# NOTE(jhesketh): The Hardware() object is expected to take care of any
# cloud provider abstraction.
with Hardware(workspace) as hardware:
hardware.boot_nodes(
masters=settings.NUMBER_MASTERS,
workers=settings.NUMBER_WORKERS)
hardware.prepare_nodes()
yield hardware
@pytest.fixture(scope="module")
def kubernetes(workspace, hardware):
# NOTE(jhesketh): We can either choose which Kubernetes class to use here
# or we can have a master class that makes the decision based off the
# config.
# If we implement multiple Kubernetes distributions (eg upstream vs skuba
# etc), we should do them from an ABC so to ensure the interfaces are
# correct.
with Kubernetes(workspace, hardware) as kubernetes:
kubernetes.bootstrap()
kubernetes.install_kubernetes()
yield kubernetes
@pytest.fixture(scope="module")
def linear_rook_cluster(workspace, kubernetes):
# (See above re implementation options)
# This method shows how fixture inheritance can be used to manage the
# infrastructure. It also builds things in order, the below rook_cluster
# fixture is preferred as it will build rook locally in a thread while
# waiting on the infrastructure
with RookCluster(workspace, kubernetes) as rook_cluster:
rook_cluster.build()
rook_cluster.preinstall()
rook_cluster.install()
yield rook_cluster
# TODO
# Need to remove reference to build rook because this won't exist in caasp for
# example
@pytest.fixture(scope="module")
def rook_cluster(workspace):
with Hardware(workspace) as hardware:
with Kubernetes(workspace, hardware) as kubernetes:
with RookCluster(workspace, kubernetes) as rook_cluster:
if settings.as_bool('_USE_THREADS'):
logger.info("Starting rook build in a thread")
build_thread = threading.Thread(
target=rook_cluster.build())
build_thread.start()
# build rook thread
hardware.boot_nodes(masters=settings.NUMBER_MASTERS,
workers=settings.NUMBER_WORKERS)
hardware.prepare_nodes()
kubernetes.bootstrap()
kubernetes.install_kubernetes()
if settings.as_bool('_USE_THREADS'):
logger.info("Re-joining rook build thread")
build_thread.join()
else:
rook_cluster.build()
# NOTE(jhesketh): The upload is very slow.. may want to
# consider how to do this in a thread too but
# is more complex with ansible.
rook_cluster.preinstall()
rook_cluster.install()
yield rook_cluster
|
threadpool.py
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Cached thread pool, inspired from Pelix/iPOPO Thread Pool
:author: Thomas Calmant
:copyright: Copyright 2019, Thomas Calmant
:license: Apache License 2.0
:version: 0.4.0
..
Copyright 2019 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import threading
try:
# Python 3
# pylint: disable=F0401
import queue
except ImportError:
# Python 2
# pylint: disable=F0401
import Queue as queue
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 4, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
class EventData(object):
"""
A threading event with some associated data
"""
def __init__(self):
"""
Sets up the event
"""
self.__event = threading.Event()
self.__data = None
self.__exception = None
@property
def data(self):
"""
Returns the associated value
"""
return self.__data
@property
def exception(self):
"""
Returns the exception used to stop the wait() method
"""
return self.__exception
def clear(self):
"""
Clears the event
"""
self.__event.clear()
self.__data = None
self.__exception = None
def is_set(self):
"""
Checks if the event is set
"""
return self.__event.is_set()
def set(self, data=None):
"""
Sets the event
"""
self.__data = data
self.__exception = None
self.__event.set()
def raise_exception(self, exception):
"""
Raises an exception in wait()
:param exception: An Exception object
"""
self.__data = None
self.__exception = exception
self.__event.set()
def wait(self, timeout=None):
"""
Waits for the event or for the timeout
:param timeout: Wait timeout (in seconds)
:return: True if the event as been set, else False
"""
# The 'or' part is for Python 2.6
result = self.__event.wait(timeout)
# pylint: disable=E0702
# Pylint seems to miss the "is None" check below
if self.__exception is None:
return result
else:
raise self.__exception
class FutureResult(object):
"""
An object to wait for the result of a threaded execution
"""
def __init__(self, logger=None):
"""
Sets up the FutureResult object
:param logger: The Logger to use in case of error (optional)
"""
self._logger = logger or logging.getLogger(__name__)
self._done_event = EventData()
self.__callback = None
self.__extra = None
def __notify(self):
"""
Notify the given callback about the result of the execution
"""
if self.__callback is not None:
try:
self.__callback(self._done_event.data,
self._done_event.exception,
self.__extra)
except Exception as ex:
self._logger.exception("Error calling back method: %s", ex)
def set_callback(self, method, extra=None):
"""
Sets a callback method, called once the result has been computed or in
case of exception.
The callback method must have the following signature:
``callback(result, exception, extra)``.
:param method: The method to call back in the end of the execution
:param extra: Extra parameter to be given to the callback method
"""
self.__callback = method
self.__extra = extra
if self._done_event.is_set():
# The execution has already finished
self.__notify()
def execute(self, method, args, kwargs):
"""
Execute the given method and stores its result.
The result is considered "done" even if the method raises an exception
:param method: The method to execute
:param args: Method positional arguments
:param kwargs: Method keyword arguments
:raise Exception: The exception raised by the method
"""
# Normalize arguments
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
# Call the method
result = method(*args, **kwargs)
except Exception as ex:
# Something went wrong: propagate to the event and to the caller
self._done_event.raise_exception(ex)
raise
else:
# Store the result
self._done_event.set(result)
finally:
# In any case: notify the call back (if any)
self.__notify()
def done(self):
"""
Returns True if the job has finished, else False
"""
return self._done_event.is_set()
def result(self, timeout=None):
"""
Waits up to timeout for the result the threaded job.
Returns immediately the result if the job has already been done.
:param timeout: The maximum time to wait for a result (in seconds)
:raise OSError: The timeout raised before the job finished
:raise Exception: The exception encountered during the call, if any
"""
if self._done_event.wait(timeout):
return self._done_event.data
else:
raise OSError("Timeout raised")
# ------------------------------------------------------------------------------
class ThreadPool(object):
"""
Executes the tasks stored in a FIFO in a thread pool
"""
def __init__(self, max_threads, min_threads=1, queue_size=0, timeout=60,
logname=None):
"""
Sets up the thread pool.
Threads are kept alive 60 seconds (timeout argument).
:param max_threads: Maximum size of the thread pool
:param min_threads: Minimum size of the thread pool
:param queue_size: Size of the task queue (0 for infinite)
:param timeout: Queue timeout (in seconds, 60s by default)
:param logname: Name of the logger
:raise ValueError: Invalid number of threads
"""
# Validate parameters
try:
max_threads = int(max_threads)
if max_threads < 1:
raise ValueError("Pool size must be greater than 0")
except (TypeError, ValueError) as ex:
raise ValueError("Invalid pool size: {0}".format(ex))
try:
min_threads = int(min_threads)
if min_threads < 0:
min_threads = 0
elif min_threads > max_threads:
min_threads = max_threads
except (TypeError, ValueError) as ex:
raise ValueError("Invalid pool size: {0}".format(ex))
# The logger
self._logger = logging.getLogger(logname or __name__)
# The loop control event
self._done_event = threading.Event()
self._done_event.set()
# The task queue
try:
queue_size = int(queue_size)
except (TypeError, ValueError):
# Not a valid integer
queue_size = 0
self._queue = queue.Queue(queue_size)
self._timeout = timeout
self.__lock = threading.RLock()
# The thread pool
self._min_threads = min_threads
self._max_threads = max_threads
self._threads = []
# Thread count
self._thread_id = 0
# Current number of threads, active and alive,
# and number of task waiting
self.__nb_threads = 0
self.__nb_active_threads = 0
self.__nb_pending_task = 0
def start(self):
"""
Starts the thread pool. Does nothing if the pool is already started.
"""
if not self._done_event.is_set():
# Stop event not set: we're running
return
# Clear the stop event
self._done_event.clear()
# Compute the number of threads to start to handle pending tasks
nb_pending_tasks = self._queue.qsize()
if nb_pending_tasks > self._max_threads:
nb_threads = self._max_threads
nb_pending_tasks = self._max_threads
elif nb_pending_tasks < self._min_threads:
nb_threads = self._min_threads
else:
nb_threads = nb_pending_tasks
# Create the threads
for _ in range(nb_pending_tasks):
self.__nb_pending_task += 1
self.__start_thread()
for _ in range(nb_threads-nb_pending_tasks):
self.__start_thread()
def __start_thread(self):
"""
Starts a new thread, if possible
"""
with self.__lock:
if self.__nb_threads >= self._max_threads:
# Can't create more threads
return False
if self._done_event.is_set():
# We're stopped: do nothing
return False
# Prepare thread and start it
name = "{0}-{1}".format(self._logger.name, self._thread_id)
self._thread_id += 1
thread = threading.Thread(target=self.__run, name=name)
thread.daemon = True
try:
self.__nb_threads += 1
thread.start()
self._threads.append(thread)
return True
except (RuntimeError, OSError):
self.__nb_threads -= 1
return False
def stop(self):
"""
Stops the thread pool. Does nothing if the pool is already stopped.
"""
if self._done_event.is_set():
# Stop event set: we're stopped
return
# Set the stop event
self._done_event.set()
with self.__lock:
# Add something in the queue (to unlock the join())
try:
for _ in self._threads:
self._queue.put(self._done_event, True, self._timeout)
except queue.Full:
# There is already something in the queue
pass
# Copy the list of threads to wait for
threads = self._threads[:]
# Join threads outside the lock
for thread in threads:
while thread.is_alive():
# Wait 3 seconds
thread.join(3)
if thread.is_alive():
# Thread is still alive: something might be wrong
self._logger.warning("Thread %s is still alive...",
thread.name)
# Clear storage
del self._threads[:]
self.clear()
def enqueue(self, method, *args, **kwargs):
"""
Queues a task in the pool
:param method: Method to call
:return: A FutureResult object, to get the result of the task
:raise ValueError: Invalid method
:raise Full: The task queue is full
"""
if not hasattr(method, '__call__'):
raise ValueError("{0} has no __call__ member."
.format(method.__name__))
# Prepare the future result object
future = FutureResult(self._logger)
# Use a lock, as we might be "resetting" the queue
with self.__lock:
# Add the task to the queue
self._queue.put((method, args, kwargs, future), True,
self._timeout)
self.__nb_pending_task += 1
if self.__nb_pending_task > self.__nb_threads:
# All threads are taken: start a new one
self.__start_thread()
return future
def clear(self):
"""
Empties the current queue content.
Returns once the queue have been emptied.
"""
with self.__lock:
# Empty the current queue
try:
while True:
self._queue.get_nowait()
self._queue.task_done()
except queue.Empty:
# Queue is now empty
pass
# Wait for the tasks currently executed
self.join()
def join(self, timeout=None):
"""
Waits for all the tasks to be executed
:param timeout: Maximum time to wait (in seconds)
:return: True if the queue has been emptied, else False
"""
if self._queue.empty():
# Nothing to wait for...
return True
elif timeout is None:
# Use the original join
self._queue.join()
return True
else:
# Wait for the condition
with self._queue.all_tasks_done:
self._queue.all_tasks_done.wait(timeout)
return not bool(self._queue.unfinished_tasks)
def __run(self):
"""
The main loop
"""
already_cleaned = False
try:
while not self._done_event.is_set():
try:
# Wait for an action (blocking)
task = self._queue.get(True, self._timeout)
if task is self._done_event:
# Stop event in the queue: get out
self._queue.task_done()
return
except queue.Empty:
# Nothing to do yet
pass
else:
with self.__lock:
self.__nb_active_threads += 1
# Extract elements
method, args, kwargs, future = task
try:
# Call the method
future.execute(method, args, kwargs)
except Exception as ex:
self._logger.exception("Error executing %s: %s",
method.__name__, ex)
finally:
# Mark the action as executed
self._queue.task_done()
# Thread is not active anymore
with self.__lock:
self.__nb_pending_task -= 1
self.__nb_active_threads -= 1
# Clean up thread if necessary
with self.__lock:
extra_threads = self.__nb_threads - self.__nb_active_threads
if self.__nb_threads > self._min_threads \
and extra_threads > self._queue.qsize():
# No more work for this thread
# if there are more non active_thread than task
# and we're above the minimum number of threads:
# stop this one
self.__nb_threads -= 1
# To avoid a race condition: decrease the number of
# threads here and mark it as already accounted for
already_cleaned = True
return
finally:
# Always clean up
with self.__lock:
# Thread stops: clean up references
try:
self._threads.remove(threading.current_thread())
except ValueError:
pass
if not already_cleaned:
self.__nb_threads -= 1
|
coach.py
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('.')
import copy
from rl_coach.core_types import EnvironmentSteps
import os
from rl_coach import logger
import traceback
from rl_coach.logger import screen, failed_imports
import argparse
import atexit
import time
import sys
from rl_coach.base_parameters import Frameworks, VisualizationParameters, TaskParameters, DistributedTaskParameters
from multiprocessing import Process
from multiprocessing.managers import BaseManager
import subprocess
from rl_coach.graph_managers.graph_manager import HumanPlayScheduleParameters, GraphManager
from rl_coach.utils import list_all_presets, short_dynamic_import, get_open_port, SharedMemoryScratchPad, get_base_dir
from rl_coach.agents.human_agent import HumanAgentParameters
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.environments.environment import SingleLevelSelection
if len(set(failed_imports)) > 0:
screen.warning("Warning: failed to import the following packages - {}".format(', '.join(set(failed_imports))))
def get_graph_manager_from_args(args: argparse.Namespace) -> 'GraphManager':
"""
Return the graph manager according to the command line arguments given by the user
:param args: the arguments given by the user
:return: the updated graph manager
"""
graph_manager = None
# if a preset was given we will load the graph manager for the preset
if args.preset is not None:
graph_manager = short_dynamic_import(args.preset, ignore_module_case=True)
# for human play we need to create a custom graph manager
if args.play:
env_params = short_dynamic_import(args.environment_type, ignore_module_case=True)()
env_params.human_control = True
schedule_params = HumanPlayScheduleParameters()
graph_manager = BasicRLGraphManager(HumanAgentParameters(), env_params, schedule_params, VisualizationParameters())
if args.level:
if isinstance(graph_manager.env_params.level, SingleLevelSelection):
graph_manager.env_params.level.select(args.level)
else:
graph_manager.env_params.level = args.level
# set the seed for the environment
if args.seed is not None:
graph_manager.env_params.seed = args.seed
# visualization
graph_manager.visualization_parameters.dump_gifs = graph_manager.visualization_parameters.dump_gifs or args.dump_gifs
graph_manager.visualization_parameters.dump_mp4 = graph_manager.visualization_parameters.dump_mp4 or args.dump_mp4
graph_manager.visualization_parameters.render = args.render
graph_manager.visualization_parameters.tensorboard = args.tensorboard
# update the custom parameters
if args.custom_parameter is not None:
unstripped_key_value_pairs = [pair.split('=') for pair in args.custom_parameter.split(';')]
stripped_key_value_pairs = [tuple([pair[0].strip(), pair[1].strip()]) for pair in
unstripped_key_value_pairs if len(pair) == 2]
# load custom parameters into run_dict
for key, value in stripped_key_value_pairs:
exec("graph_manager.{}={}".format(key, value))
return graph_manager
def parse_arguments(parser: argparse.ArgumentParser) -> argparse.Namespace:
"""
Parse the arguments that the user entered
:param parser: the argparse command line parser
:return: the parsed arguments
"""
args = parser.parse_args()
# if no arg is given
if len(sys.argv) == 1:
parser.print_help()
exit(0)
# list available presets
preset_names = list_all_presets()
if args.list:
screen.log_title("Available Presets:")
for preset in sorted(preset_names):
print(preset)
sys.exit(0)
# replace a short preset name with the full path
if args.preset is not None:
if args.preset.lower() in [p.lower() for p in preset_names]:
args.preset = "{}.py:graph_manager".format(os.path.join(get_base_dir(), 'presets', args.preset))
else:
args.preset = "{}".format(args.preset)
# if a graph manager variable was not specified, try the default of :graph_manager
if len(args.preset.split(":")) == 1:
args.preset += ":graph_manager"
# verify that the preset exists
preset_path = args.preset.split(":")[0]
if not os.path.exists(preset_path):
screen.error("The given preset ({}) cannot be found.".format(args.preset))
# verify that the preset can be instantiated
try:
short_dynamic_import(args.preset, ignore_module_case=True)
except TypeError as e:
traceback.print_exc()
screen.error('Internal Error: ' + str(e) + "\n\nThe given preset ({}) cannot be instantiated."
.format(args.preset))
# validate the checkpoints args
if args.checkpoint_restore_dir is not None and not os.path.exists(args.checkpoint_restore_dir):
screen.error("The requested checkpoint folder to load from does not exist.")
# no preset was given. check if the user requested to play some environment on its own
if args.preset is None and args.play:
if args.environment_type:
args.agent_type = 'Human'
else:
screen.error('When no preset is given for Coach to run, and the user requests human control over '
'the environment, the user is expected to input the desired environment_type and level.'
'\nAt least one of these parameters was not given.')
elif args.preset and args.play:
screen.error("Both the --preset and the --play flags were set. These flags can not be used together. "
"For human control, please use the --play flag together with the environment type flag (-et)")
elif args.preset is None and not args.play:
screen.error("Please choose a preset using the -p flag or use the --play flag together with choosing an "
"environment type (-et) in order to play the game.")
# get experiment name and path
args.experiment_name = logger.get_experiment_name(args.experiment_name)
args.experiment_path = logger.get_experiment_path(args.experiment_name)
if args.play and args.num_workers > 1:
screen.warning("Playing the game as a human is only available with a single worker. "
"The number of workers will be reduced to 1")
args.num_workers = 1
args.framework = Frameworks[args.framework.lower()]
# checkpoints
args.save_checkpoint_dir = os.path.join(args.experiment_path, 'checkpoint') if args.save_checkpoint_secs is not None else None
return args
def add_items_to_dict(target_dict, source_dict):
updated_task_parameters = copy.copy(source_dict)
updated_task_parameters.update(target_dict)
return updated_task_parameters
def open_dashboard(experiment_path):
dashboard_path = 'python {}/dashboard.py'.format(get_base_dir())
cmd = "{} --experiment_dir {}".format(dashboard_path, experiment_path)
screen.log_title("Opening dashboard - experiment path: {}".format(experiment_path))
# subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True, executable="/bin/bash")
subprocess.Popen(cmd, shell=True, executable="/bin/bash")
def start_graph(graph_manager: 'GraphManager', task_parameters: 'TaskParameters'):
graph_manager.create_graph(task_parameters)
# let the adventure begin
if task_parameters.evaluate_only:
graph_manager.evaluate(EnvironmentSteps(sys.maxsize), keep_networks_in_sync=True)
else:
graph_manager.improve()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preset',
help="(string) Name of a preset to run (class name from the 'presets' directory.)",
default=None,
type=str)
parser.add_argument('-l', '--list',
help="(flag) List all available presets",
action='store_true')
parser.add_argument('-e', '--experiment_name',
help="(string) Experiment name to be used to store the results.",
default='',
type=str)
parser.add_argument('-r', '--render',
help="(flag) Render environment",
action='store_true')
parser.add_argument('-f', '--framework',
help="(string) Neural network framework. Available values: tensorflow",
default='tensorflow',
type=str)
parser.add_argument('-n', '--num_workers',
help="(int) Number of workers for multi-process based agents, e.g. A3C",
default=1,
type=int)
parser.add_argument('-c', '--use_cpu',
help="(flag) Use only the cpu for training. If a GPU is not available, this flag will have no "
"effect and the CPU will be used either way.",
action='store_true')
parser.add_argument('-ew', '--evaluation_worker',
help="(int) If multiple workers are used, add an evaluation worker as well which will "
"evaluate asynchronously and independently during the training. NOTE: this worker will "
"ignore the evaluation settings in the preset's ScheduleParams.",
action='store_true')
parser.add_argument('--play',
help="(flag) Play as a human by controlling the game with the keyboard. "
"This option will save a replay buffer with the game play.",
action='store_true')
parser.add_argument('--evaluate',
help="(flag) Run evaluation only. This is a convenient way to disable "
"training in order to evaluate an existing checkpoint.",
action='store_true')
parser.add_argument('-v', '--verbosity',
help="(flag) Sets the verbosity level of Coach print outs. Can be either low or high.",
default="low",
type=str)
parser.add_argument('-tfv', '--tf_verbosity',
help="(flag) TensorFlow verbosity level",
default=3,
type=int)
parser.add_argument('-s', '--save_checkpoint_secs',
help="(int) Time in seconds between saving checkpoints of the model.",
default=None,
type=int)
parser.add_argument('-crd', '--checkpoint_restore_dir',
help='(string) Path to a folder containing a checkpoint to restore the model from.',
type=str)
parser.add_argument('-dg', '--dump_gifs',
help="(flag) Enable the gif saving functionality.",
action='store_true')
parser.add_argument('-dm', '--dump_mp4',
help="(flag) Enable the mp4 saving functionality.",
action='store_true')
parser.add_argument('-at', '--agent_type',
help="(string) Choose an agent type class to override on top of the selected preset. "
"If no preset is defined, a preset can be set from the command-line by combining settings "
"which are set by using --agent_type, --experiment_type, --environemnt_type",
default=None,
type=str)
parser.add_argument('-et', '--environment_type',
help="(string) Choose an environment type class to override on top of the selected preset."
"If no preset is defined, a preset can be set from the command-line by combining settings "
"which are set by using --agent_type, --experiment_type, --environemnt_type",
default=None,
type=str)
parser.add_argument('-ept', '--exploration_policy_type',
help="(string) Choose an exploration policy type class to override on top of the selected "
"preset."
"If no preset is defined, a preset can be set from the command-line by combining settings "
"which are set by using --agent_type, --experiment_type, --environemnt_type"
,
default=None,
type=str)
parser.add_argument('-lvl', '--level',
help="(string) Choose the level that will be played in the environment that was selected."
"This value will override the level parameter in the environment class."
,
default=None,
type=str)
parser.add_argument('-cp', '--custom_parameter',
help="(string) Semicolon separated parameters used to override specific parameters on top of"
" the selected preset (or on top of the command-line assembled one). "
"Whenever a parameter value is a string, it should be inputted as '\\\"string\\\"'. "
"For ex.: "
"\"visualization.render=False; num_training_iterations=500; optimizer='rmsprop'\"",
default=None,
type=str)
parser.add_argument('--print_parameters',
help="(flag) Print tuning_parameters to stdout",
action='store_true')
parser.add_argument('-tb', '--tensorboard',
help="(flag) When using the TensorFlow backend, enable TensorBoard log dumps. ",
action='store_true')
parser.add_argument('-ns', '--no_summary',
help="(flag) Prevent Coach from printing a summary and asking questions at the end of runs",
action='store_true')
parser.add_argument('-d', '--open_dashboard',
help="(flag) Open dashboard with the experiment when the run starts",
action='store_true')
parser.add_argument('--seed',
help="(int) A seed to use for running the experiment",
default=None,
type=int)
args = parse_arguments(parser)
graph_manager = get_graph_manager_from_args(args)
# Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
# This will not affect GPU runs.
os.environ["OMP_NUM_THREADS"] = "1"
# turn TF debug prints off
if args.framework == Frameworks.tensorflow:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)
# turn off the summary at the end of the run if necessary
if not args.no_summary:
atexit.register(logger.summarize_experiment)
screen.change_terminal_title(args.experiment_name)
# open dashboard
if args.open_dashboard:
open_dashboard(args.experiment_path)
# Single-threaded runs
if args.num_workers == 1:
# Start the training or evaluation
task_parameters = TaskParameters(framework_type="tensorflow", # TODO: tensorflow should'nt be hardcoded
evaluate_only=args.evaluate,
experiment_path=args.experiment_path,
seed=args.seed,
use_cpu=args.use_cpu)
task_parameters.__dict__ = add_items_to_dict(task_parameters.__dict__, args.__dict__)
start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
# Multi-threaded runs
else:
total_tasks = args.num_workers
if args.evaluation_worker:
total_tasks += 1
ps_hosts = "localhost:{}".format(get_open_port())
worker_hosts = ",".join(["localhost:{}".format(get_open_port()) for i in range(total_tasks)])
# Shared memory
class CommManager(BaseManager):
pass
CommManager.register('SharedMemoryScratchPad', SharedMemoryScratchPad, exposed=['add', 'get', 'internal_call'])
comm_manager = CommManager()
comm_manager.start()
shared_memory_scratchpad = comm_manager.SharedMemoryScratchPad()
def start_distributed_task(job_type, task_index, evaluation_worker=False,
shared_memory_scratchpad=shared_memory_scratchpad):
task_parameters = DistributedTaskParameters(framework_type="tensorflow", # TODO: tensorflow should'nt be hardcoded
parameters_server_hosts=ps_hosts,
worker_hosts=worker_hosts,
job_type=job_type,
task_index=task_index,
evaluate_only=evaluation_worker,
use_cpu=args.use_cpu,
num_tasks=total_tasks, # training tasks + 1 evaluation task
num_training_tasks=args.num_workers,
experiment_path=args.experiment_path,
shared_memory_scratchpad=shared_memory_scratchpad,
seed=args.seed+task_index if args.seed is not None else None) # each worker gets a different seed
task_parameters.__dict__ = add_items_to_dict(task_parameters.__dict__, args.__dict__)
# we assume that only the evaluation workers are rendering
graph_manager.visualization_parameters.render = args.render and evaluation_worker
p = Process(target=start_graph, args=(graph_manager, task_parameters))
# p.daemon = True
p.start()
return p
# parameter server
parameter_server = start_distributed_task("ps", 0)
# training workers
# wait a bit before spawning the non chief workers in order to make sure the session is already created
workers = []
workers.append(start_distributed_task("worker", 0))
time.sleep(2)
for task_index in range(1, args.num_workers):
workers.append(start_distributed_task("worker", task_index))
# evaluation worker
if args.evaluation_worker:
evaluation_worker = start_distributed_task("worker", args.num_workers, evaluation_worker=True)
# wait for all workers
[w.join() for w in workers]
if args.evaluation_worker:
evaluation_worker.terminate()
if __name__ == "__main__":
main()
|
installwizard.py
|
import sys
import threading
import os
import traceback
from typing import Tuple, List
from PyQt5.QtCore import *
from qtum_electrum.wallet import Wallet, Abstract_Wallet
from qtum_electrum.storage import WalletStorage
from qtum_electrum.util import UserCancelled, InvalidPassword, WalletFileException
from qtum_electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from qtum_electrum.i18n import _
from qtum_electrum.plugin import run_hook
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n' \
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n' \
+ _(
"Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> QDckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> MNhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> qc1q3fjfk...')
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
BaseWizard.__init__(self, config, plugins)
QDialog.__init__(self, None)
self.setWindowTitle('Qtum Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum wallet'))
self.temp_storage = WalletStorage(path)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.temp_storage = None
self.next_button.setEnabled(False)
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, self.temp_storage if self.temp_storage.file_exists() else None
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename)).scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(360)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
title = _('Enter Seed')
options = []
if 'mobile' == self.wallet_type:
message = ''.join([
_('Please enter your seed phrase in order to restore your wallet. \n'),
_('This is compatible with qtum mobile wallet. \n')])
else:
if self.opt_ext:
options.append('ext')
# if self.opt_bip39:
# options.append('bip39')
message = ''.join([
_('Please enter your seed phrase in order to restore your wallet. \n')])
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
self.please_wait.setText(msg)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0 / 60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
line.repaint()
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(5)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMaximumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
# message = _("Electrum communicates with remote servers to get "
# "information about your transactions and addresses. The "
# "servers all fulfill the same purpose only differing in "
# "hardware. In most cases you simply want to let Electrum "
# "pick one at random. However if you prefer feel free to "
# "select a server manually.")
# choices = [_("Auto connect"), _("Select server manually")]
# title = _("How do you want to connect to a server? ")
# clayout = ChoicesLayout(message, choices)
# self.back_button.setText(_('Cancel'))
# self.exec_layout(clayout.layout(), title)
# r = clayout.selected_index()
# if r == 1:
# nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
# if self.exec_layout(nlayout.layout()):
# nlayout.accept()
# else:
# network.auto_connect = True
# self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Sheet Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test sheetd shutdown."""
from threading import Thread
from test_framework.test_framework import SheetTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
def test_long_call(node):
block = node.waitfornewblock(5000)
assert_equal(block['height'], 0)
class ShutdownTest(SheetTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coverage_dir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2, err_msg="wait until getrpcinfo active commands")
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0) #, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
favorites.py
|
import datetime
import logging
import re
import threading
from telegram import ForceReply
from telegram import InlineKeyboardButton
from telegram import InlineKeyboardMarkup, ReplyKeyboardMarkup
from telegram.ext import ConversationHandler
import captions
import mdformat
import const
import settings
import util
from dialog import messages
from layouts import Layouts
from models import Bot
from models import Favorite
from models import Statistic
from models import User
from const import CallbackActions, DeepLinkingActions
from models import track_activity
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
log = logging.getLogger(__name__)
def add_favorite_handler(bot, update, args=None):
uid = util.uid_from_update(update)
from components.basic import main_menu_buttons
main_menu_markup = ReplyKeyboardMarkup(main_menu_buttons(uid in settings.MODERATORS))
if args:
query = ' '.join(args) if isinstance(args, list) else args
try:
# TODO: add multiple
username = re.match(settings.REGEX_BOT_IN_TEXT, query).groups()[0]
try:
# TODO: get exact database matches for input without `@`
item = Bot.by_username(username, include_disabled=True)
return add_favorite(bot, update, item)
except Bot.DoesNotExist:
buttons = [
InlineKeyboardButton(
"Yai!", callback_data=util.callback_for_action(CallbackActions.ADD_ANYWAY, {'u': username})),
InlineKeyboardButton("Nay...", callback_data=util.callback_for_action(CallbackActions.ADD_FAVORITE))
]
reply_markup = InlineKeyboardMarkup([buttons])
util.send_md_message(bot, uid,
"{} is not in the @BotList. Do you want to add it to your {} anyway?".format(
username, captions.FAVORITES),
reply_markup=reply_markup)
except AttributeError:
# invalid bot username
# TODO when does this happen?
update.message.reply_text(
util.failure("Sorry, but that is not a valid username. Please try again. /addfav"))
else:
buttons = [
InlineKeyboardButton("Search inline", switch_inline_query_current_chat='')
]
reply_markup = InlineKeyboardMarkup([buttons])
bot.sendMessage(uid, messages.ADD_FAVORITE, reply_markup=ForceReply(selective=True))
return ConversationHandler.END
def add_favorite(bot, update, item: Bot, callback_alert=None):
user = User.from_update(update)
uid = util.uid_from_update(update)
mid = util.mid_from_update(update)
from components.basic import main_menu_buttons
main_menu_markup = ReplyKeyboardMarkup(main_menu_buttons(uid in settings.MODERATORS))
fav, created = Favorite.add(user=user, item=item)
if created:
Statistic.of(user, 'add-favorite', item.username)
text = mdformat.love("{} added to your {}favorites.".format(fav.bot, '' if callback_alert else '/'))
if callback_alert:
update.callback_query.answer(text=text, show_alert=False)
else:
msg = util.send_md_message(bot, uid, text, to_edit=mid, reply_markup=main_menu_markup)
mid = msg.message_id
util.wait(bot, update)
send_favorites_list(bot, update, to_edit=mid)
else:
text = mdformat.none_action(
"{} is already a favorite of yours.{}".format(fav.bot, '' if callback_alert else ' /favorites'))
if callback_alert:
update.callback_query.answer(text=text, show_alert=False)
else:
util.send_md_message(bot, uid, text, reply_markup=main_menu_markup)
return ConversationHandler.END
@track_activity('view-favorites', level=Statistic.ANALYSIS)
def send_favorites_list(bot, update, to_edit=None):
uid = util.uid_from_update(update)
user = User.from_update(update)
t = threading.Thread(target=_too_many_favorites_handler, args=(bot, update, user))
t.start()
favorites = Favorite.select_all(user)
buttons = [
[
InlineKeyboardButton(captions.ADD_FAVORITE,
callback_data=util.callback_for_action(CallbackActions.ADD_FAVORITE)),
InlineKeyboardButton(captions.REMOVE_FAVORITE,
callback_data=util.callback_for_action(CallbackActions.REMOVE_FAVORITE_MENU))
],
[
InlineKeyboardButton('Layout: ' + Layouts.get_caption(user.favorites_layout),
callback_data=util.callback_for_action(
CallbackActions.TOGGLE_FAVORITES_LAYOUT,
{'v': Layouts.get_next(user.favorites_layout)})),
],
[
InlineKeyboardButton(captions.SHARE, switch_inline_query=DeepLinkingActions.FAVORITES),
]
]
reply_markup = InlineKeyboardMarkup(buttons)
if to_edit is None:
to_edit = util.mid_from_update(update)
if len(favorites) == 0:
text = "You have no favorites yet."
else:
text = _favorites_categories_md(favorites, user.favorites_layout)
bot.formatter.send_or_edit(uid, text,
to_edit=to_edit, reply_markup=reply_markup)
@track_activity('toggled their favorites layout', level=Statistic.ANALYSIS)
def toggle_favorites_layout(bot, update, value):
uid = util.uid_from_update(update)
user = User.from_update(update)
user.favorites_layout = value
user.save()
send_favorites_list(bot, update)
def _favorites_categories_md(favorites, layout=None):
text = messages.FAVORITES_HEADLINE + '\n'
chunks = list()
if layout == 'single':
# text += '\n'
favorites.sort(key=lambda x: x.bot.username)
bots = [f.bot for f in favorites]
total = len(bots) - 1
for n, bot in enumerate(bots):
if n < total:
list_icon = '├'
else:
list_icon = '└'
chunks.append('{} {}'.format(list_icon, str(bot)))
all_favorites = '\n'.join(chunks)
text += all_favorites
else:
# sort favorites by database order
favorites.sort(key=lambda x: x.bot.category.order if x.bot.category else x.bot.username)
current_category = None
for n, f in enumerate(favorites):
bot = f.bot
category = bot.category
try:
if favorites[n + 1].bot.category != category:
list_icon = '└'
else:
list_icon = '├'
except IndexError:
list_icon = '└'
if current_category is None or category != current_category:
category_no_bulletin = str(category)[1:]
chunks.append('\n*{}*'.format(category_no_bulletin))
chunks.append('{} {}'.format(list_icon, str(bot)))
current_category = category
all_favorites = '\n'.join(chunks)
text += all_favorites
return text
@track_activity('menu', 'remove favorite', Statistic.DETAILED)
def remove_favorite_menu(bot, update):
uid = util.uid_from_update(update)
user = User.from_update(update)
favorites = Favorite.select_all(user)
fav_remove_buttons = [InlineKeyboardButton(
'✖️ {}'.format(str(f.bot.username)),
callback_data=util.callback_for_action(CallbackActions.REMOVE_FAVORITE, {'id': f.id}))
for f in favorites]
buttons = util.build_menu(fav_remove_buttons, 2, header_buttons=[
InlineKeyboardButton(captions.DONE,
callback_data=util.callback_for_action(CallbackActions.SEND_FAVORITES_LIST))
])
reply_markup = InlineKeyboardMarkup(buttons)
bot.formatter.send_or_edit(uid, util.action_hint("Select favorites to remove"),
to_edit=util.mid_from_update(update),
reply_markup=reply_markup)
def _too_many_favorites_handler(bot, update, user):
uid = util.uid_from_update(update)
any_removed = False
while too_many_favorites(user):
oldest = Favorite.get_oldest(user)
oldest.delete_instance()
any_removed = True
Statistic.of(update, 'had to lose a favorite because HE HAD TOO FUCKIN MANY 😬')
if any_removed:
txt = "You have too many favorites, _they do not fit into a single message_. That's why I removed your " \
"oldest bot, *{}*, from your list of favorites.".format(oldest.bot if oldest.bot else oldest.custom_bot)
util.send_md_message(bot, uid, txt)
def too_many_favorites(user):
favs = Favorite.select_all(user)
promo = max(len(messages.PROMOTION_MESSAGE), len(messages.FAVORITES_HEADLINE))
message_length = len(_favorites_categories_md(favs)) + promo + 4
return message_length > 4096
def add_custom(bot, update, username):
uid = util.uid_from_update(update)
user = User.from_update(update)
mid = util.mid_from_update(update)
from components.basic import main_menu_buttons
main_menu_markup = ReplyKeyboardMarkup(main_menu_buttons(uid in settings.MODERATORS))
try:
fav = Favorite.get(custom_bot=username)
util.send_or_edit_md_message(
bot, uid, mdformat.none_action(
"{} is already a favorite of yours. /favorites".format(fav.custom_bot)),
to_edit=mid,
reply_markup=main_menu_markup)
except Favorite.DoesNotExist:
fav = Favorite(user=user, custom_bot=username, date_added=datetime.date.today())
fav.save()
msg = bot.formatter.send_or_edit(uid,
mdformat.love("{} added to your /favorites.".format(fav.custom_bot)),
to_edit=mid)
mid = msg.message_id
util.wait(bot, update)
send_favorites_list(bot, update, to_edit=mid)
return ConversationHandler.END
|
process_control.py
|
# TODO more comprehensive tests
from __future__ import division
from __future__ import absolute_import # XXX is this necessary?
from wx.lib.agw import pyprogress
import wx
from libtbx import thread_utils
from libtbx import runtime_utils
from libtbx import easy_pickle
from libtbx import easy_run
from libtbx.utils import Sorry, Abort, download_progress
import threading
import random
import locale
import math
import os
JOB_START_ID = wx.NewId()
LOG_UPDATE_ID = wx.NewId()
CALLBACK_ID = wx.NewId()
JOB_EXCEPTION_ID = wx.NewId()
JOB_KILLED_ID = wx.NewId()
JOB_COMPLETE_ID = wx.NewId()
JOB_PAUSE_ID = wx.NewId()
JOB_RESUME_ID = wx.NewId()
DOWNLOAD_COMPLETE_ID = wx.NewId()
DOWNLOAD_INCREMENT_ID = wx.NewId()
class SubprocessEvent(wx.PyEvent):
event_id = None
def __init__(self, data, **kwds):
self.data = data
self.__dict__.update(kwds)
wx.PyEvent.__init__(self)
self.SetEventType(self.event_id)
class JobStartEvent(SubprocessEvent):
event_id = JOB_START_ID
class LogEvent(SubprocessEvent):
event_id = LOG_UPDATE_ID
class JobExceptionEvent(SubprocessEvent):
event_id = JOB_EXCEPTION_ID
class JobKilledEvent(SubprocessEvent):
event_id = JOB_KILLED_ID
class JobCompleteEvent(SubprocessEvent):
event_id = JOB_COMPLETE_ID
class CallbackEvent(SubprocessEvent):
event_id = CALLBACK_ID
class JobPauseEvent(SubprocessEvent):
event_id = JOB_PAUSE_ID
class JobResumeEvent(SubprocessEvent):
event_id = JOB_RESUME_ID
class DownloadCompleteEvent(SubprocessEvent):
event_id = DOWNLOAD_COMPLETE_ID
class DownloadIncrementEvent(SubprocessEvent):
event_id = DOWNLOAD_INCREMENT_ID
def setup_stdout_logging_event(window, OnPrint):
window.Connect(-1, -1, LOG_UPDATE_ID, OnPrint)
def setup_process_gui_events(
window,
OnStart=None,
OnPrint=None,
OnUpdate=None,
OnExcept=None,
OnAbort=None,
OnComplete=None,
OnPause=None,
OnResume=None):
if OnStart is not None :
assert hasattr(OnStart, "__call__")
window.Connect(-1, -1, JOB_START_ID, OnStart)
if OnPrint is not None :
assert hasattr(OnPrint, "__call__")
window.Connect(-1, -1, LOG_UPDATE_ID, OnPrint)
if OnUpdate is not None :
assert hasattr(OnUpdate, "__call__")
window.Connect(-1, -1, CALLBACK_ID, OnUpdate)
if OnExcept is not None :
assert hasattr(OnExcept, "__call__")
window.Connect(-1, -1, JOB_EXCEPTION_ID, OnExcept)
if OnAbort is not None :
assert hasattr(OnAbort, "__call__")
window.Connect(-1, -1, JOB_KILLED_ID, OnAbort)
if OnComplete is not None :
assert hasattr(OnComplete, "__call__")
window.Connect(-1, -1, JOB_COMPLETE_ID, OnComplete)
if OnPause is not None :
assert hasattr(OnPause, "__call__")
window.Connect(-1, -1, JOB_PAUSE_ID, OnPause)
if OnResume is not None :
assert hasattr(OnResume, "__call__")
window.Connect(-1, -1, JOB_RESUME_ID, OnResume)
class event_agent(object):
def __init__(self, window, **kwds):
self.window = window
self._kwds = dict(kwds)
self.__dict__.update(kwds)
def get_kwds(self):
return self._kwds
def callback_start(self, data):
kwds = self.get_kwds()
event = JobStartEvent(data, **kwds)
wx.PostEvent(self.window, event)
def callback_stdout(self, data):
kwds = self.get_kwds()
event = LogEvent(data, **kwds)
wx.PostEvent(self.window, event)
def callback_error(self, error, traceback_info):
kwds = self.get_kwds()
event = JobExceptionEvent((error, traceback_info), **kwds)
wx.PostEvent(self.window, event)
def callback_abort(self):
kwds = self.get_kwds()
event = JobKilledEvent(None, **kwds)
wx.PostEvent(self.window, event)
def callback_final(self, result):
kwds = self.get_kwds()
event = JobCompleteEvent(result, **kwds)
wx.PostEvent(self.window, event)
def callback_other(self, data):
kwds = self.get_kwds()
event = CallbackEvent(data, **kwds)
wx.PostEvent(self.window, event)
def callback_pause(self):
kwds = self.get_kwds()
event = JobPauseEvent(None, **kwds)
wx.PostEvent(self.window, event)
def callback_resume(self):
kwds = self.get_kwds()
event = JobResumeEvent(None, **kwds)
wx.PostEvent(self.window, event)
# simplified for when the window is really the app object
class background_event_agent(event_agent):
def callback_stdout(self, data):
pass
def callback_other(self, data):
pass
class detached_process(runtime_utils.detached_process_client):
def __init__(self, params, proxy):
runtime_utils.detached_process_client.__init__(self, params)
self.proxy = proxy
def callback_start(self, data):
self.proxy.callback_start(data)
def callback_stdout(self, data):
self.proxy.callback_stdout(data)
def callback_other(self, data):
self.proxy.callback_other(data)
def callback_abort(self):
self.proxy.callback_abort()
def callback_final(self, result):
self.proxy.callback_final(result)
def callback_error(self, error, traceback_info):
self.proxy.callback_error(error, traceback_info)
def callback_pause(self):
self.proxy.callback_pause()
def callback_resume(self):
self.proxy.callback_resume()
def start(self):
pass
# this just adds event posting callbacks to the original class
class process_with_gui_callbacks(thread_utils.process_with_callbacks):
def __init__(self, proxy, target, args=(), kwargs={}, buffer_stdout=True):
thread_utils.process_with_callbacks.__init__(self,
target = target,
args=args,
kwargs=kwargs,
callback_stdout = proxy.callback_stdout,
callback_final = proxy.callback_final,
callback_err = proxy.callback_error,
callback_abort = proxy.callback_abort,
callback_other = proxy.callback_other,
callback_pause = proxy.callback_pause,
callback_resume = proxy.callback_resume,
buffer_stdout = buffer_stdout)
def set_job(self, job):
pass
def purge_files(self):
pass
class simple_gui_process(process_with_gui_callbacks):
def __init__(self, window, target, args=(), kwargs={}):
# XXX fix for phenix gui - is this necessary?
proxy = event_agent(window, project_id=None, job_id=None)
process_with_gui_callbacks.__init__(self,
proxy=proxy,
target=target,
args=args,
kwargs=kwargs,
buffer_stdout=True)
class ThreadProgressDialog(pyprogress.PyProgress):
def __init__(self, parent, title, message):
pyprogress.PyProgress.__init__(self, parent, -1, title, message,
agwStyle=wx.PD_ELAPSED_TIME|wx.PD_APP_MODAL)
self.SetGaugeProportion(0.15)
self.SetGaugeSteps(50)
self.SetGaugeBackground(wx.Colour(235, 235, 235))
self.SetFirstGradientColour(wx.Colour(235,235,235))
self.SetSecondGradientColour(wx.Colour(120, 200, 255))
class download_file_basic(object):
def __init__(self, window, dl_func, args):
assert isinstance(window, wx.EvtHandler)
assert hasattr(dl_func, "__call__")
assert (isinstance(args, list) or isinstance(args, tuple))
self.window = window
window.Connect(-1, -1, DOWNLOAD_COMPLETE_ID, self.OnComplete)
self.dl_func = dl_func
self.args = args
self.t = threading.Thread(target=self.run)
self.t.start()
def run(self):
try :
result = self.dl_func(self.args)
except Exception, e :
result = (None, str(e))
finally :
wx.PostEvent(self.window, DownloadCompleteEvent(result))
return result
def OnComplete(self, event):
if isinstance(event.data, basestring):
wx.MessageBox(message="File downloaded to %s" % event.data)
else :
wx.MessageBox(message="Error downloading file: %s" % event.data[1],
caption="Download error", style=wx.ICON_ERROR)
self.t.join()
class DownloadProgressDialog(wx.ProgressDialog, download_progress):
"""
Dialog for displaying download progress. The actual download (not
implemented here) should be run in a separate thread, with a reasonable
chunk size, and call download_progress.increment() as each new chunk is
downloaded.
"""
def __init__(self, parent, title, message):
download_progress.__init__(self)
wx.ProgressDialog.__init__(self, parent=parent,
title=title,
message=message,
style=wx.PD_ELAPSED_TIME|wx.PD_CAN_ABORT|wx.PD_AUTO_HIDE,
maximum=100)
self.Connect(-1, -1, DOWNLOAD_INCREMENT_ID, self.OnIncrement)
self.Connect(-1, -1, DOWNLOAD_COMPLETE_ID, self.OnComplete)
self._continue = True
def show_progress(self):
if (not self._continue):
return False
locale.setlocale(locale.LC_ALL, 'en_US')
pct = self.percent_finished()
msg = "%s/%s KB downloaded" % (
locale.format("%d", self.n_kb_elapsed, grouping=True),
locale.format("%d", self.n_kb_total, grouping=True))
evt = DownloadIncrementEvent(data=(pct, msg))
wx.PostEvent(self, evt)
return self._continue
def OnIncrement(self, event):
(cont, skip) = self.Update(value=event.data[0], newmsg=event.data[1])
self._continue = cont
def OnComplete(self, event):
self.Hide()
self.Close()
# FIXME destroying the dialog crashes wxPython 2.9.5/osx-coocoa
def complete(self):
evt = DownloadCompleteEvent(data=None)
wx.PostEvent(self, evt)
class BackgroundDownloadDialog(pyprogress.PyProgress, download_progress):
"""
Placeholder for downloads which block the child thread; will pulse
continuously but not show changing status.
"""
def __init__(self, parent, title, message):
download_progress.__init__(self)
pyprogress.PyProgress.__init__(self, parent, -1, title, message,
agwStyle=wx.PD_ELAPSED_TIME|wx.PD_CAN_ABORT|wx.PD_AUTO_HIDE)
self.SetGaugeProportion(0.15)
self.SetGaugeSteps(100)
self.SetGaugeBackground(wx.Colour(235, 235, 235))
self.SetFirstGradientColour(wx.Colour(235,235,235))
self.SetSecondGradientColour(wx.Colour(120, 200, 255))
self.Connect(-1, -1, DOWNLOAD_COMPLETE_ID, self.OnComplete)
self._continue = True
def show_progress(self):
if (not self._continue):
return False
return self._continue
def OnComplete(self, event):
self.Hide()
self.Close()
def complete(self):
evt = DownloadCompleteEvent(data=None)
wx.PostEvent(self, evt)
def run_function_as_thread_in_dialog(parent, thread_function, title, message):
dlg = ThreadProgressDialog(None, title, message)
t = thread_utils.simple_task_thread(thread_function, dlg)
t.start()
while True :
if t.is_complete() or t.exception_raised():
#dlg.Destroy()
dlg.Hide()
break
else :
dlg.UpdatePulse()
wx.MilliSleep(30)
dlg.Destroy()
wx.SafeYield()
if t.exception_raised():
raise RuntimeError("An exception occurred while running this process: %s" %
t.get_error())
return t.return_value
# TODO
class ProcessDialog(wx.Dialog):
def __init__(self, parent, message, caption, callback=None):
wx.Dialog.__init__(self,
parent=parent,
title=caption,
style=wx.RAISED_BORDER|wx.CAPTION)
self.callback = callback
self.process = None
self._error = None
self._aborted = False
szr = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(szr)
szr2 = wx.BoxSizer(wx.VERTICAL)
szr.Add(szr2, 1, wx.ALL, 5)
msg_txt = wx.StaticText(self, -1, message)
msg_txt.Wrap(400)
szr2.Add(msg_txt, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5)
self.gauge = wx.Gauge(parent=self, size=(300,-1))
self.gauge.SetRange(100)
szr2.Add(self.gauge, 1, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5)
abort_btn = wx.Button(parent=self,
label="Abort")
self.Bind(wx.EVT_BUTTON, self.OnAbort, abort_btn)
szr2.Add(abort_btn, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5)
self.SetMinSize((300,100))
szr.Fit(self)
self.Centre(wx.BOTH)
def run(self, process):
self.process = process
self._timer = wx.Timer(owner=self)
self.Bind(wx.EVT_TIMER, self.OnTimer)
self._timer.Start(100)
self.process.start()
self.gauge.Pulse()
return self.ShowModal()
def OnTimer(self, event):
if hasattr(self.process,'update'):
self.process.update()
self.gauge.Pulse()
def OnAbort(self, event):
self.process.abort()
self._aborted = True
self.EndModal(wx.ID_CANCEL)
def OnError(self, event):
self._error = event.data
self.EndModal(wx.ID_CANCEL)
def exception_raised(self):
return (self._error is not None)
def was_aborted(self):
return (self._aborted)
def handle_error(self):
if isinstance(self._error, Exception):
raise event.data
elif isinstance(self._error, tuple):
exception, traceback = self._error
if (isinstance(exception, Sorry)):
raise Sorry(str(exception))
raise RuntimeError("""\
Error in subprocess!
Original error: %s
Original traceback:
%s""" % (str(exception), traceback))
else :
raise Sorry("error in child process: %s" % str(self._error))
# finally :
# self.EndModal(wx.ID_CANCEL)
def OnComplete(self, event):
try :
if (self.callback is not None):
self.callback(event.data)
finally :
self._result = event.data
self._timer.Stop()
self.EndModal(wx.ID_OK)
def get_result(self):
return getattr(self, "_result", None)
def run_function_as_process_in_dialog(
parent,
thread_function,
title,
message,
callback=None,
project_id=None,
job_id=None):
dlg = ProcessDialog(
parent=parent,
message=message,
caption=title,
callback=callback)
setup_process_gui_events(
window=dlg,
OnExcept=dlg.OnError,
OnComplete=dlg.OnComplete)
cb = event_agent(dlg, project_id=project_id, job_id=job_id)
p = thread_utils.process_with_callbacks(
target=thread_function,
callback_final=cb.callback_final,
callback_err=cb.callback_error,
buffer_stdout=True,
sleep_after_start=1)
result = None
abort = False
if (dlg.run(p) == wx.ID_OK):
result = dlg.get_result()
elif dlg.exception_raised():
dlg.handle_error()
elif (dlg.was_aborted()):
abort = True
wx.CallAfter(dlg.Destroy)
if (abort):
raise Abort()
return result
# TODO this is awful, needs to be re-thought
def run_function_as_detached_process_in_dialog(
parent,
thread_function,
title,
message,
tmp_dir,
callback=None,
project_id=None,
job_id=None):
if (tmp_dir is None):
tmp_dir = os.getcwd()
params = runtime_utils.process_master_phil.extract()
params.tmp_dir = tmp_dir
if (job_id is None):
job_id = str(os.getpid()) + "_" + str(int(random.random() * 1000))
params.prefix = str(job_id)
target = runtime_utils.detached_process_driver(target=thread_function)
run_file = os.path.join(tmp_dir, "libtbx_run_%s.pkl" % job_id)
easy_pickle.dump(run_file, target)
params.run_file = run_file
eff_file = os.path.join(tmp_dir, "libtbx_run_%s.eff" % job_id)
runtime_utils.write_params(params, eff_file)
dlg = ProcessDialog(
parent=parent,
message=message,
caption=title,
callback=callback)
setup_process_gui_events(
window=dlg,
OnExcept=dlg.OnError,
OnAbort=dlg.OnAbort,
OnComplete=dlg.OnComplete)
agent = event_agent(
window=dlg,
project_id=project_id,
job_id=job_id)
process = detached_process(params, proxy=agent)
cb = event_agent(dlg, project_id=project_id, job_id=job_id)
easy_run.call("libtbx.start_process \"%s\" &" % eff_file)
result = None
abort = False
if (dlg.run(process) == wx.ID_OK):
result = dlg.get_result()
elif dlg.exception_raised():
dlg.handle_error()
elif (dlg.was_aborted()):
abort = True
wx.CallAfter(dlg.Destroy)
if (abort):
raise Abort()
return result
########################################################################
# XXX regression testing utilities
def test_function_1(*args, **kwds):
n = 0
for i in range(25000):
x = math.sqrt(i)
print x
n += x
return n
def test_function_2(*args, **kwds):
n = 0
for i in range(100000):
x = math.sqrt(i)
n += x
return n
def test_function_3(*args, **kwds):
raise RuntimeError("This is a test!")
|
code.py
|
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality for formatting, linting, etc. code."""
from __future__ import annotations
import os
import subprocess
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from efrotools.filecache import FileCache
if TYPE_CHECKING:
from typing import Set, List, Dict, Any, Union, Optional
def formatcode(projroot: Path, full: bool) -> None:
"""Run clang-format on all of our source code (multithreaded)."""
import time
import concurrent.futures
from multiprocessing import cpu_count
from efrotools import get_files_hash
os.chdir(projroot)
cachepath = Path(projroot, 'config/.cache-formatcode')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
cfconfig = Path(projroot, '.clang-format')
filenames = get_code_filenames(projroot)
confighash = get_files_hash([cfconfig])
cache.update(filenames, confighash)
dirtyfiles = cache.get_dirty_files()
def format_file(filename: str) -> Dict[str, Any]:
start_time = time.time()
# Note: seems os.system does not unlock the gil;
# make sure to use subprocess.
result = subprocess.call(['clang-format', '-i', filename])
if result != 0:
raise Exception(f'Formatting failed for {filename}')
duration = time.time() - start_time
print(f'Formatted {filename} in {duration:.2f} seconds.')
sys.stdout.flush()
return {'f': filename, 't': duration}
with concurrent.futures.ThreadPoolExecutor(
max_workers=cpu_count()) as executor:
# Converting this to a list will propagate any errors.
list(executor.map(format_file, dirtyfiles))
if dirtyfiles:
# Since we changed files, need to update hashes again.
cache.update(filenames, confighash)
cache.mark_clean(filenames)
cache.write()
print(f'Formatting is up to date for {len(filenames)} code files.',
flush=True)
def cpplint(projroot: Path, full: bool) -> None:
"""Run lint-checking on all code deemed lint-able."""
# pylint: disable=too-many-locals
import tempfile
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from efrotools import getconfig
from efro.terminal import Clr
from efro.error import CleanError
os.chdir(projroot)
filenames = get_code_filenames(projroot)
for fpath in filenames:
if ' ' in fpath:
raise Exception(f'Found space in path {fpath}; unexpected.')
# Check the config for a list of ones to ignore.
code_blacklist: List[str] = getconfig(projroot).get(
'cpplint_blacklist', [])
# Just pretend blacklisted ones don't exist.
filenames = [f for f in filenames if f not in code_blacklist]
filenames = [f for f in filenames if not f.endswith('.mm')]
cachepath = Path(projroot, 'config/.cache-lintcode')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
# Clear out entries and hashes for files that have changed/etc.
cache.update(filenames, '')
dirtyfiles = cache.get_dirty_files()
if dirtyfiles:
print(f'{Clr.BLU}CppLint checking'
f' {len(dirtyfiles)} file(s)...{Clr.RST}')
# We want to do a few custom modifications to the cpplint module...
try:
import cpplint as cpplintmodule
except Exception:
raise CleanError('Unable to import cpplint')
with open(cpplintmodule.__file__) as infile:
codelines = infile.read().splitlines()
cheadersline = codelines.index('_C_HEADERS = frozenset([')
# Extra headers we consider as valid C system headers.
c_headers = [
'malloc.h', 'tchar.h', 'jni.h', 'android/log.h', 'EGL/egl.h',
'libgen.h', 'linux/netlink.h', 'linux/rtnetlink.h', 'android/bitmap.h',
'android/log.h', 'uuid/uuid.h', 'cxxabi.h', 'direct.h', 'shellapi.h',
'rpc.h', 'io.h'
]
codelines.insert(cheadersline + 1, ''.join(f"'{h}'," for h in c_headers))
# Skip unapproved C++ headers check (it flags <mutex>, <thread>, etc.)
headercheckline = codelines.index(
" if include and include.group(1) in ('cfenv',")
codelines[headercheckline] = (
" if False and include and include.group(1) in ('cfenv',")
# Don't complain about unknown NOLINT categories.
# (we use them for clang-tidy)
unknownlintline = codelines.index(
' elif category not in _LEGACY_ERROR_CATEGORIES:')
codelines[unknownlintline] = ' elif False:'
def lint_file(filename: str) -> None:
result = subprocess.call(['cpplint', '--root=src', filename], env=env)
if result != 0:
raise CleanError(
f'{Clr.RED}Cpplint failed for {filename}.{Clr.RST}')
with tempfile.TemporaryDirectory() as tmpdir:
# Write our replacement module, make it discoverable, then run.
with open(tmpdir + '/cpplint.py', 'w') as outfile:
outfile.write('\n'.join(codelines))
env = os.environ.copy()
env['PYTHONPATH'] = tmpdir
with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
# Converting this to a list will propagate any errors.
list(executor.map(lint_file, dirtyfiles))
if dirtyfiles:
cache.mark_clean(filenames)
cache.write()
print(
f'{Clr.GRN}CppLint: all {len(filenames)} files are passing.{Clr.RST}',
flush=True)
def get_code_filenames(projroot: Path) -> List[str]:
"""Return the list of files to lint-check or auto-formatting."""
from efrotools import getconfig
exts = ('.h', '.c', '.cc', '.cpp', '.cxx', '.m', '.mm')
places = getconfig(projroot).get('code_source_dirs', None)
if places is None:
raise RuntimeError('code_source_dirs not declared in config')
codefilenames = []
for place in places:
for root, _dirs, files in os.walk(place):
for fname in files:
if any(fname.endswith(ext) for ext in exts):
codefilenames.append(os.path.join(root, fname))
codefilenames.sort()
return codefilenames
def formatscripts(projroot: Path, full: bool) -> None:
"""Runs yapf on all our scripts (multithreaded)."""
import time
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from efrotools import get_files_hash
os.chdir(projroot)
cachepath = Path(projroot, 'config/.cache-formatscripts')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
yapfconfig = Path(projroot, '.style.yapf')
filenames = get_script_filenames(projroot)
confighash = get_files_hash([yapfconfig])
cache.update(filenames, confighash)
dirtyfiles = cache.get_dirty_files()
def format_file(filename: str) -> None:
start_time = time.time()
result = subprocess.call(['yapf', '--in-place', filename])
if result != 0:
raise Exception(f'Formatting failed for {filename}')
duration = time.time() - start_time
print(f'Formatted {filename} in {duration:.2f} seconds.')
sys.stdout.flush()
with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
# Convert the futures to a list to propagate any errors even
# though there are no return values we use.
list(executor.map(format_file, dirtyfiles))
if dirtyfiles:
# Since we changed files, need to update hashes again.
cache.update(filenames, confighash)
cache.mark_clean(filenames)
cache.write()
print(f'Formatting is up to date for {len(filenames)} script files.',
flush=True)
def _should_include_script(fnamefull: str) -> bool:
fname = os.path.basename(fnamefull)
if fname.endswith('.py'):
return True
# Look for 'binary' scripts with no extensions too.
if not fname.startswith('.') and '.' not in fname:
try:
with open(fnamefull) as infile:
line = infile.readline()
if '/usr/bin/env python' in line or '/usr/bin/python' in line:
return True
except UnicodeDecodeError:
# Actual binary files will probably kick back this error.
pass
return False
def get_script_filenames(projroot: Path) -> List[str]:
"""Return the Python filenames to lint-check or auto-format."""
from efrotools import getconfig
filenames = set()
places = getconfig(projroot).get('python_source_dirs', None)
if places is None:
raise RuntimeError('python_source_dirs not declared in config')
for place in places:
for root, _dirs, files in os.walk(place):
for fname in files:
fnamefull = os.path.join(root, fname)
# Skip symlinks (we conceivably operate on the original too)
if os.path.islink(fnamefull):
continue
if _should_include_script(fnamefull):
filenames.add(fnamefull)
return sorted(list(f for f in filenames if 'flycheck_' not in f))
def runpylint(projroot: Path, filenames: List[str]) -> None:
"""Run Pylint explicitly on files."""
pylintrc = Path(projroot, '.pylintrc')
if not os.path.isfile(pylintrc):
raise Exception('pylintrc not found where expected')
# Technically we could just run pylint standalone via command line here,
# but let's go ahead and run it inline so we're consistent with our cached
# full-project version.
_run_pylint(projroot,
pylintrc,
cache=None,
dirtyfiles=filenames,
allfiles=None)
def pylint(projroot: Path, full: bool, fast: bool) -> None:
"""Run Pylint on all scripts in our project (with smart dep tracking)."""
from efrotools import get_files_hash
from efro.terminal import Clr
pylintrc = Path(projroot, '.pylintrc')
if not os.path.isfile(pylintrc):
raise Exception('pylintrc not found where expected')
filenames = get_script_filenames(projroot)
if any(' ' in name for name in filenames):
raise Exception('found space in path; unexpected')
script_blacklist: List[str] = []
filenames = [f for f in filenames if f not in script_blacklist]
cachebasename = '.cache-lintscriptsfast' if fast else '.cache-lintscripts'
cachepath = Path(projroot, 'config', cachebasename)
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
# Clear out entries and hashes for files that have changed/etc.
cache.update(filenames, get_files_hash([pylintrc]))
# Do a recursive dependency check and mark all files who are
# either dirty or have a dependency that is dirty.
filestates: Dict[str, bool] = {}
for fname in filenames:
_dirty_dep_check(fname, filestates, cache, fast, 0)
dirtyfiles = [k for k, v in filestates.items() if v]
# Let's sort by modification time, so ones we're actively trying
# to fix get linted first and we see remaining errors faster.
dirtyfiles.sort(reverse=True, key=lambda f: os.stat(f).st_mtime)
if dirtyfiles:
print(
f'{Clr.BLU}Pylint checking {len(dirtyfiles)} file(s)...{Clr.RST}',
flush=True)
try:
_run_pylint(projroot, pylintrc, cache, dirtyfiles, filenames)
finally:
# No matter what happens, we still want to
# update our disk cache (since some lints may have passed).
cache.write()
print(f'{Clr.GRN}Pylint: all {len(filenames)} files are passing.{Clr.RST}',
flush=True)
cache.write()
def _dirty_dep_check(fname: str, filestates: Dict[str, bool], cache: FileCache,
fast: bool, recursion: int) -> bool:
"""Recursively check a file's deps and return whether it is dirty."""
# pylint: disable=too-many-branches
if not fast:
# Check for existing dirty state (only applies in non-fast where
# we recurse infinitely).
curstate = filestates.get(fname)
if curstate is not None:
return curstate
# Ok; there's no current state for this file.
# First lets immediately mark it as clean so if a dependency of ours
# queries it we won't loop infinitely. (If we're actually dirty that
# will be reflected properly once we're done).
if not fast:
filestates[fname] = False
# If this dependency has disappeared, consider that dirty.
if fname not in cache.entries:
dirty = True
else:
cacheentry = cache.entries[fname]
# See if we ourself are dirty
if 'hash' not in cacheentry:
dirty = True
else:
# Ok we're clean; now check our dependencies..
dirty = False
# Only increment recursion in fast mode, and
# skip dependencies if we're pass the recursion limit.
recursion2 = recursion
if fast:
# Our one exception is top level ba which basically aggregates.
if not fname.endswith('/ba/__init__.py'):
recursion2 += 1
if recursion2 <= 1:
deps = cacheentry.get('deps', [])
for dep in deps:
# If we have a dep that no longer exists, WE are dirty.
if not os.path.exists(dep):
dirty = True
break
if _dirty_dep_check(dep, filestates, cache, fast,
recursion2):
dirty = True
break
# Cache and return our dirty state..
# Note: for fast mode we limit to recursion==0 so we only write when
# the file itself is being directly visited.
if recursion == 0:
filestates[fname] = dirty
return dirty
def _run_pylint(projroot: Path, pylintrc: Union[Path, str],
cache: Optional[FileCache], dirtyfiles: List[str],
allfiles: Optional[List[str]]) -> Dict[str, Any]:
import time
from pylint import lint
from efro.error import CleanError
from efro.terminal import Clr
start_time = time.time()
args = ['--rcfile', str(pylintrc), '--output-format=colorized']
args += dirtyfiles
name = f'{len(dirtyfiles)} file(s)'
run = lint.Run(args, do_exit=False)
if cache is not None:
assert allfiles is not None
result = _apply_pylint_run_to_cache(projroot, run, dirtyfiles,
allfiles, cache)
if result != 0:
raise CleanError(f'Pylint failed for {result} file(s).')
# Sanity check: when the linter fails we should always be failing too.
# If not, it means we're probably missing something and incorrectly
# marking a failed file as clean.
if run.linter.msg_status != 0 and result == 0:
raise RuntimeError('Pylint linter returned non-zero result'
' but we did not; this is probably a bug.')
else:
if run.linter.msg_status != 0:
raise CleanError('Pylint failed.')
duration = time.time() - start_time
print(f'{Clr.GRN}Pylint passed for {name}'
f' in {duration:.1f} seconds.{Clr.RST}')
sys.stdout.flush()
return {'f': dirtyfiles, 't': duration}
def _apply_pylint_run_to_cache(projroot: Path, run: Any, dirtyfiles: List[str],
allfiles: List[str], cache: FileCache) -> int:
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
from astroid import modutils
from efrotools import getconfig
from efro.error import CleanError
# First off, build a map of dirtyfiles to module names
# (and the corresponding reverse map).
paths_to_names: Dict[str, str] = {}
names_to_paths: Dict[str, str] = {}
for fname in allfiles:
try:
mpath = modutils.modpath_from_file(fname)
mpath = _filter_module_name('.'.join(mpath))
paths_to_names[fname] = mpath
except ImportError:
# This probably means its a tool or something not in our
# standard path. In this case just use its base name.
# (seems to be what pylint does)
dummyname = os.path.splitext(os.path.basename(fname))[0]
paths_to_names[fname] = dummyname
for key, val in paths_to_names.items():
names_to_paths[val] = key
# If there's any cyclic-import errors, just mark all deps as dirty;
# don't want to add the logic to figure out which ones the cycles cover
# since they all seems to appear as errors for the last file in the list.
cycles: int = run.linter.stats.get('by_msg', {}).get('cyclic-import', 0)
have_dep_cycles: bool = cycles > 0
if have_dep_cycles:
print(f'Found {cycles} cycle-errors; keeping all dirty files dirty.')
# Update dependencies for what we just ran.
# A run leaves us with a map of modules to a list of the modules that
# imports them. We want the opposite though: for each of our modules
# we want a list of the modules it imports.
reversedeps = {}
# Make sure these are all proper module names; no foo.bar.__init__ stuff.
for key, val in run.linter.stats['dependencies'].items():
sval = [_filter_module_name(m) for m in val]
reversedeps[_filter_module_name(key)] = sval
deps: Dict[str, Set[str]] = {}
untracked_deps = set()
for mname, mallimportedby in reversedeps.items():
for mimportedby in mallimportedby:
if mname in names_to_paths:
deps.setdefault(mimportedby, set()).add(mname)
else:
untracked_deps.add(mname)
ignored_untracked_deps: List[str] = getconfig(projroot).get(
'pylint_ignored_untracked_deps', [])
# Add a few that this package itself triggers.
ignored_untracked_deps += ['pylint.lint', 'astroid.modutils', 'astroid']
# Ignore some specific untracked deps; complain about any others.
untracked_deps = set(dep for dep in untracked_deps
if dep not in ignored_untracked_deps)
if untracked_deps:
raise CleanError(
f'Pylint found untracked dependencies: {untracked_deps}.'
' If these are external to your project, add them to'
' "pylint_ignored_untracked_deps" in the project config.')
# Finally add the dependency lists to our entries (operate on
# everything in the run; it may not be mentioned in deps).
no_deps_modules = set()
for fname in dirtyfiles:
fmod = paths_to_names[fname]
if fmod not in deps:
# Since this code is a bit flaky, lets always announce when we
# come up empty and keep a whitelist of expected values to ignore.
no_deps_modules.add(fmod)
depsval: List[str] = []
else:
# Our deps here are module names; store paths.
depsval = [names_to_paths[dep] for dep in deps[fmod]]
cache.entries[fname]['deps'] = depsval
# Let's print a list of modules with no detected deps so we can make sure
# this is behaving.
if no_deps_modules:
if bool(False):
print('NOTE: no dependencies found for:',
', '.join(no_deps_modules))
# Ok, now go through all dirtyfiles involved in this run.
# Mark them as either errored or clean depending on whether there's
# error info for them in the run stats.
# Once again need to convert any foo.bar.__init__ to foo.bar.
stats_by_module: Dict[str, Any] = {
_filter_module_name(key): val
for key, val in run.linter.stats['by_module'].items()
}
errcount = 0
for fname in dirtyfiles:
mname2 = paths_to_names.get(fname)
if mname2 is None:
raise Exception('unable to get module name for "' + fname + '"')
counts = stats_by_module.get(mname2)
# 'statement' count seems to be new and always non-zero; ignore it
if counts is not None:
counts = {c: v for c, v in counts.items() if c != 'statement'}
if (counts is not None and any(counts.values())) or have_dep_cycles:
# print('GOT FAIL FOR', fname, counts)
if 'hash' in cache.entries[fname]:
del cache.entries[fname]['hash']
errcount += 1
else:
# print('MARKING FILE CLEAN', mname2, fname)
cache.entries[fname]['hash'] = (cache.curhashes[fname])
return errcount
def _filter_module_name(mpath: str) -> str:
"""Filter weird module paths such as 'foo.bar.__init__' to 'foo.bar'."""
# Seems Pylint returns module paths with __init__ on the end in some cases
# and not in others. Could dig into it, but for now just filtering them
# out...
return mpath[:-9] if mpath.endswith('.__init__') else mpath
def runmypy(projroot: Path,
filenames: List[str],
full: bool = False,
check: bool = True) -> None:
"""Run MyPy on provided filenames."""
from efrotools import PYTHON_BIN
args = [
PYTHON_BIN, '-m', 'mypy', '--pretty', '--no-error-summary',
'--config-file',
str(Path(projroot, '.mypy.ini'))
] + filenames
if full:
args.insert(args.index('mypy') + 1, '--no-incremental')
subprocess.run(args, check=check)
def mypy(projroot: Path, full: bool) -> None:
"""Type check all of our scripts using mypy."""
import time
from efro.terminal import Clr
from efro.error import CleanError
filenames = get_script_filenames(projroot)
desc = '(full)' if full else '(incremental)'
print(f'{Clr.BLU}Running Mypy {desc}...{Clr.RST}', flush=True)
starttime = time.time()
try:
runmypy(projroot, filenames, full)
except Exception:
raise CleanError('Mypy failed.')
duration = time.time() - starttime
print(f'{Clr.GRN}Mypy passed in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def dmypy(projroot: Path) -> None:
"""Type check all of our scripts using mypy in daemon mode."""
import time
from efro.terminal import Clr
from efro.error import CleanError
filenames = get_script_filenames(projroot)
# Special case; explicitly kill the daemon.
if '-stop' in sys.argv:
subprocess.run(['dmypy', 'stop'], check=False)
return
print('Running Mypy (daemon)...', flush=True)
starttime = time.time()
try:
args = [
'dmypy', 'run', '--timeout', '3600', '--', '--config-file',
'.mypy.ini', '--follow-imports=error', '--pretty'
] + filenames
subprocess.run(args, check=True)
except Exception:
raise CleanError('Mypy daemon: fail.')
duration = time.time() - starttime
print(f'{Clr.GRN}Mypy daemon passed in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def _parse_idea_results(path: Path) -> int:
"""Print errors found in an idea inspection xml file.
Returns the number of errors found.
"""
import xml.etree.ElementTree as Et
error_count = 0
root = Et.parse(str(path)).getroot()
for child in root:
line: Optional[str] = None
description: Optional[str] = None
fname: Optional[str] = None
if child.tag == 'problem':
is_error = True
for pchild in child:
if pchild.tag == 'problem_class':
# We still report typos but we don't fail the
# check due to them (that just gets tedious).
if pchild.text == 'Typo':
is_error = False
if pchild.tag == 'line':
line = pchild.text
if pchild.tag == 'description':
description = pchild.text
if pchild.tag == 'file':
fname = pchild.text
if isinstance(fname, str):
fname = fname.replace('file://$PROJECT_DIR$/', '')
print(f'{fname}:{line}: {description}')
if is_error:
error_count += 1
return error_count
def _run_idea_inspections(projroot: Path,
scripts: List[str],
displayname: str,
inspect: Path,
verbose: bool,
inspectdir: Path = None) -> None:
"""Actually run idea inspections.
Throw an Exception if anything is found or goes wrong.
"""
# pylint: disable=too-many-locals
import tempfile
import time
import datetime
from efro.error import CleanError
from efro.terminal import Clr
start_time = time.time()
print(
f'{Clr.BLU}{displayname} checking'
f' {len(scripts)} file(s)...{Clr.RST}',
flush=True)
tmpdir = tempfile.TemporaryDirectory()
iprof = Path(projroot, '.idea/inspectionProfiles/Default.xml')
if not iprof.exists():
iprof = Path(projroot, '.idea/inspectionProfiles/Project_Default.xml')
if not iprof.exists():
raise Exception('No default inspection profile found.')
cmd = [str(inspect), str(projroot), str(iprof), tmpdir.name, '-v2']
if inspectdir is not None:
cmd += ['-d', str(inspectdir)]
running = True
def heartbeat() -> None:
"""Print the time occasionally to make the log more informative."""
while running:
time.sleep(60)
print('Heartbeat', datetime.datetime.now(), flush=True)
if verbose:
import threading
print(cmd, flush=True)
threading.Thread(target=heartbeat, daemon=True).start()
result = subprocess.run(cmd, capture_output=not verbose, check=False)
running = False
if result.returncode != 0:
# In verbose mode this stuff got printed already.
if not verbose:
stdout = (
result.stdout.decode() if isinstance( # type: ignore
result.stdout, bytes) else str(result.stdout))
stderr = (
result.stderr.decode() if isinstance( # type: ignore
result.stdout, bytes) else str(result.stdout))
print(f'{displayname} inspection failure stdout:\n{stdout}' +
f'{displayname} inspection failure stderr:\n{stderr}')
raise RuntimeError(f'{displayname} inspection failed.')
files = [f for f in os.listdir(tmpdir.name) if not f.startswith('.')]
total_errors = 0
if files:
for fname in files:
total_errors += _parse_idea_results(Path(tmpdir.name, fname))
if total_errors > 0:
raise CleanError(f'{Clr.SRED}{displayname} inspection'
f' found {total_errors} error(s).{Clr.RST}')
duration = time.time() - start_time
print(
f'{Clr.GRN}{displayname} passed for {len(scripts)} files'
f' in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def _run_idea_inspections_cached(cachepath: Path,
filenames: List[str],
full: bool,
projroot: Path,
displayname: str,
inspect: Path,
verbose: bool,
inspectdir: Path = None) -> None:
# pylint: disable=too-many-locals
import hashlib
import json
from efro.terminal import Clr
md5 = hashlib.md5()
# Let's calc a single hash from the contents of all script files and only
# run checks when that changes. Sadly there's not much else optimization
# wise that we can easily do, but this will at least prevent re-checks when
# nothing at all has changed.
for filename in filenames:
with open(filename, 'rb') as infile:
md5.update(infile.read())
# Also hash a few .idea files so we re-run inspections when they change.
extra_hash_paths = [
Path(projroot, '.idea/inspectionProfiles/Default.xml'),
Path(projroot, '.idea/inspectionProfiles/Project_Default.xml'),
Path(projroot, '.idea/dictionaries/ericf.xml')
]
for epath in extra_hash_paths:
if os.path.exists(epath):
with open(epath, 'rb') as infile:
md5.update(infile.read())
current_hash = md5.hexdigest()
existing_hash: Optional[str]
try:
with open(cachepath) as infile2:
existing_hash = json.loads(infile2.read())['hash']
except Exception:
existing_hash = None
if full or current_hash != existing_hash:
_run_idea_inspections(projroot,
filenames,
displayname,
inspect=inspect,
verbose=verbose,
inspectdir=inspectdir)
with open(cachepath, 'w') as outfile:
outfile.write(json.dumps({'hash': current_hash}))
print(
f'{Clr.GRN}{displayname}: all {len(filenames)}'
f' files are passing.{Clr.RST}',
flush=True)
def pycharm(projroot: Path, full: bool, verbose: bool) -> None:
"""Run pycharm inspections on all our scripts."""
import time
# FIXME: Generalize this to work with at least linux, possibly windows.
cachepath = Path('config/.cache-pycharm')
filenames = get_script_filenames(projroot)
pycharmroot = Path('/Applications/PyCharm CE.app')
pycharmbin = Path(pycharmroot, 'Contents/MacOS/pycharm')
inspect = Path(pycharmroot, 'Contents/bin/inspect.sh')
# In full mode, clear out pycharm's caches first.
# It seems we need to spin up the GUI and give it a bit to
# re-cache system python for this to work...
# UPDATE: This really slows things down, so we now only do it in
# very specific cases where time isn't important.
# (such as our daily full-test-runs)
if full and os.environ.get('EFROTOOLS_FULL_PYCHARM_RECACHE') == '1':
print('Clearing PyCharm caches...', flush=True)
subprocess.run('rm -rf ~/Library/Caches/PyCharmCE*',
shell=True,
check=True)
print('Launching GUI PyCharm to rebuild caches...', flush=True)
process = subprocess.Popen(str(pycharmbin))
# Wait a bit and ask it nicely to die.
# We need to make sure it has enough time to do its cache updating
# thing even if the system is fully under load.
time.sleep(10 * 60)
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
subprocess.run(
"osascript -e 'tell application \"PyCharm CE\" to quit'",
shell=True,
check=False)
# process.terminate()
print('Waiting for GUI PyCharm to quit...', flush=True)
process.wait()
_run_idea_inspections_cached(cachepath=cachepath,
filenames=filenames,
full=full,
projroot=projroot,
displayname='PyCharm',
inspect=inspect,
verbose=verbose)
def clioncode(projroot: Path, full: bool, verbose: bool) -> None:
"""Run clion inspections on all our code."""
import time
cachepath = Path('config/.cache-clioncode')
filenames = get_code_filenames(projroot)
clionroot = Path('/Applications/CLion.app')
clionbin = Path(clionroot, 'Contents/MacOS/clion')
inspect = Path(clionroot, 'Contents/bin/inspect.sh')
# At the moment offline clion inspections seem a bit flaky.
# They don't seem to run at all if we haven't opened the project
# in the GUI, and it seems recent changes can get ignored for that
# reason too.
# So for now let's try blowing away caches, launching the gui
# temporarily, and then kicking off inspections after that. Sigh.
print('Clearing CLion caches...', flush=True)
subprocess.run('rm -rf ~/Library/Caches/CLion*', shell=True, check=True)
# Note: I'm assuming this project needs to be open when the GUI
# comes up. Currently just have one project so can rely on auto-open
# but may need to get fancier later if that changes.
print('Launching GUI CLion to rebuild caches...', flush=True)
process = subprocess.Popen(str(clionbin))
# Wait a moment and ask it nicely to die.
waittime = 120
while waittime > 0:
print(f'Waiting for {waittime} more seconds.')
time.sleep(10)
waittime -= 10
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
subprocess.run("osascript -e 'tell application \"CLion\" to quit'",
shell=True,
check=False)
# process.terminate()
print('Waiting for GUI CLion to quit...', flush=True)
process.wait(timeout=60)
print('Launching Offline CLion to run inspections...', flush=True)
_run_idea_inspections_cached(
cachepath=cachepath,
filenames=filenames,
full=full,
projroot=Path(projroot, 'ballisticacore-cmake'),
inspectdir=Path(projroot, 'ballisticacore-cmake/src/ballistica'),
displayname='CLion',
inspect=inspect,
verbose=verbose)
def androidstudiocode(projroot: Path, full: bool, verbose: bool) -> None:
"""Run Android Studio inspections on all our code."""
# import time
cachepath = Path('config/.cache-androidstudiocode')
filenames = get_code_filenames(projroot)
clionroot = Path('/Applications/Android Studio.app')
# clionbin = Path(clionroot, 'Contents/MacOS/studio')
inspect = Path(clionroot, 'Contents/bin/inspect.sh')
# At the moment offline clion inspections seem a bit flaky.
# They don't seem to run at all if we haven't opened the project
# in the GUI, and it seems recent changes can get ignored for that
# reason too.
# So for now let's try blowing away caches, launching the gui
# temporarily, and then kicking off inspections after that. Sigh.
# print('Clearing Android Studio caches...', flush=True)
# subprocess.run('rm -rf ~/Library/Caches/AndroidStudio*',
# shell=True,
# check=True)
# Note: I'm assuming this project needs to be open when the GUI
# comes up. Currently just have one project so can rely on auto-open
# but may need to get fancier later if that changes.
# print('Launching GUI CLion to rebuild caches...', flush=True)
# process = subprocess.Popen(str(clionbin))
# Wait a moment and ask it nicely to die.
# time.sleep(120)
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
# subprocess.run(
# "osascript -e 'tell application \"Android Studio\" to quit'",
# shell=True)
# process.terminate()
# print('Waiting for GUI CLion to quit...', flush=True)
# process.wait(timeout=60)
print('Launching Offline Android Studio to run inspections...', flush=True)
_run_idea_inspections_cached(
cachepath=cachepath,
filenames=filenames,
full=full,
projroot=Path(projroot, 'ballisticacore-android'),
inspectdir=Path(
projroot,
'ballisticacore-android/BallisticaCore/src/main/cpp/src/ballistica'
),
# inspectdir=None,
displayname='Android Studio',
inspect=inspect,
verbose=verbose)
|
bot.py
|
import telebot;
from telebot import types;
import threading;
from aitextgen import aitextgen;
from functions import *;
from config import TOKEN;
import psutil;
import os;
pid = os.getpid()
py = psutil.Process(pid)
queue = []
ai = aitextgen(model_folder="model")
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=['start'])
def start(message):
id = message.chat.id
markup = types.InlineKeyboardMarkup()
markup.row_width = 2
markup.add(types.InlineKeyboardButton("Поддержать", callback_data="support"))
bot.send_message(id, "Привет! \nЯ бот который с помощью нейросети генерирует анекдоты. Пиши \"Давай анек\". \n Поддержать бота: https://donationalerts.com/r/kurays", reply_markup=markup)
@bot.message_handler(commands=['go'])
def go(message):
id = message.chat.id
if queue.count(id) > 0:
bot.send_message(id, "Спокойно спокойно жди когда догенерю!")
return
bot.send_message(id, "Генерю нейроанекдот")
to_queue(queue, id)
if check_queue(queue, id): bot.send_message(id, "Ваша позиция в очереди " + get_index(queue, id))
threading.Thread(target = start_waiting, args = [bot, queue, ai, id]).start()
@bot.message_handler(commands=['queue'])
def check(message):
id = message.chat.id
if queue.count(id) == 0:
bot.send_message(id, "Вас нет в очереди")
return
bot.send_message(id, "Ваша позиция в очереди " + get_index(queue, id))
@bot.message_handler(commands=['stat'])
def stats(message):
id = message.chat.id
bot.send_message(id,
"RAM USAGE BY BOT: {} GB".format(round(py.memory_info()[0]/2.**30),2) + "\n" +
"RAM: {0}/{1} GB".format(round(psutil.virtual_memory().used/1024/1024/1024),round(psutil.virtual_memory().total/1024/1024/1024)) + "\n" +
"CPU LOAD: {}%".format(psutil.cpu_percent(interval=0.5)) + "\n" +
"QUEUE: {}".format(len(queue))
)
@bot.callback_query_handler(func=lambda call: True)
def callback_query(call):
if call.data == "cb_yes":
bot.answer_callback_query(call.id, "Пока это не записываем к сожалению")
elif call.data == "cb_no":
bot.answer_callback_query(call.id, "Пока это не записываем к сожалению")
elif call.data == "cb_next":
id = call.message.chat.id
if queue.count(id) > 0:
bot.send_message(id, "Спокойно спокойно жди когда догенерю!")
return
bot.send_message(id, "Идем делать следующий :)")
to_queue(queue, id)
if check_queue(queue, id): bot.send_message(id, "Ваша позиция в очереди " + get_index(queue, id))
threading.Thread(target = start_waiting, args = [bot, queue, ai, id]).start()
bot.polling(none_stop=True, interval=0)
|
data_reader.py
|
import threading
import Queue
import operator
import os
import sys
class MapReduce:
''' MapReduce - to use, subclass by defining these functions,
then call self.map_reduce():
parse_fn(self, k, v) => [(k, v), ...]
map_fn(self, k, v) => [(k, v1), (k, v2), ...]
reduce_fn(self, k, [v1, v2, ...]) => [(k, v)]
output_fn(self, [(k, v), ...])
'''
def __init__(self):
self.data = None
self.num_worker_threads = 5
class SynchronizedDict(): # we need this for merging
def __init__(self):
self.lock = threading.Lock()
self.d = {}
def isin(self, k):
with self.lock:
if k in self.d:
return True
else:
return False
def get(self, k):
with self.lock:
return self.d[k]
def set(self, k, v): # we don't need del
with self.lock:
self.d[k] = v
def set_append(self, k, v): # for thread-safe list append
with self.lock:
self.d[k].append(v)
def items(self):
with self.lock:
return self.d.items()
def create_queue(self, input_list): # helper fn for queues
output_queue = Queue.Queue()
for value in input_list:
output_queue.put(value)
return output_queue
def create_list(self, input_queue): # helper fn for queues
output_list = []
while not input_queue.empty():
item = input_queue.get()
output_list.append(item)
input_queue.task_done()
return output_list
def merge_fn(self, k, v, merge_dict): # helper fn for merge
if merge_dict.isin(k):
merge_dict.set_append(k, v)
else:
merge_dict.set(k, [v])
def process_queue(self, input_queue, fn_selector): # helper fn
output_queue = Queue.Queue()
if fn_selector == 'merge':
merge_dict = self.SynchronizedDict()
def worker():
while not input_queue.empty():
(k, v) = input_queue.get()
if fn_selector in ['map', 'reduce']:
if fn_selector == 'map':
result_list = self.map_fn(k, v)
elif fn_selector == 'reduce':
result_list = self.reduce_fn(k, v)
for result_tuple in result_list: # flatten
output_queue.put(result_tuple)
elif fn_selector == 'merge': # merge v to same k
self.merge_fn(k, v, merge_dict)
else:
raise Exception, "Bad fn_selector="+fn_selector
input_queue.task_done()
for i in range(self.num_worker_threads): # start threads
worker_thread = threading.Thread(target=worker)
worker_thread.daemon = True
worker_thread.start()
input_queue.join() # wait for worker threads to finish
if fn_selector == 'merge':
output_list = sorted(merge_dict.items(), key=operator.itemgetter(0))
output_queue = self.create_queue(output_list)
return output_queue
def map_reduce(self): # the actual map-reduce algoritm
data_list = self.parse_fn(self.data)
data_queue = self.create_queue(data_list) # enqueue the data so we can multi-process
map_queue = self.process_queue(data_queue, 'map') # [(k,v),...] => [(k,v1),(k,v2),...]
# do not need merge and reduce
# merge_queue = self.process_queue(map_queue, 'merge') # [(k,v1),(k,v2),...] => [(k,[v1,v2,...]),...]
# reduce_queue = self.process_queue(merge_queue, 'reduce') # [(k,[v1,v2,...]),...] => [(k,v),...]
output_list = self.create_list(map_queue) # deque into list for output handling
return self.output_fn(output_list)
class DataReader(MapReduce):
"""DataReader - simple read data content"""
def __init__(self, filenames):
MapReduce.__init__(self)
self.data = filenames
def parse_fn(self, data): # break string into [(k, v), ...] tuples for each line
data_list = map(lambda filename: (None, filename), data)
return data_list
def map_fn(self, key, filename): # return (filename, content) tuples for each file, ignore key
os.write(1, '\rmap file: %s' %(filename))
sys.stdout.flush()
with open(filename) as f:
data = f.readlines()
return [(filename, data)]
def reduce_fn(self, filename, data): # just return data
return [(filename, data)]
def output_fn(self, output_list): # just return
return output_list
def create_test_file(data_dir):
if not os.path.exists(data_dir):
os.mkdir(data_dir)
for i in range(0, 20):
filename = 'file%d.txt' %(i)
fp = os.path.join(data_dir, filename)
with open(fp, 'w') as f:
f.write('file %d' %(i))
def main():
data_dir = 'tmp/'
create_test_file(data_dir)
filenames = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith('.txt')]
print(filenames)
dr = DataReader(filenames)
content = dr.map_reduce()
print
print(content)
print(len(content))
if __name__ == '__main__':
main()
|
engine.py
|
# -*- coding: utf-8 -*-
"""The multi-process processing engine."""
from __future__ import unicode_literals
import abc
import ctypes
import logging
import os
import signal
import sys
import threading
import time
from plaso.engine import engine
from plaso.engine import process_info
from plaso.lib import definitions
from plaso.multi_processing import plaso_xmlrpc
class MultiProcessEngine(engine.BaseEngine):
"""Multi-process engine base.
This class contains functionality to:
* monitor and manage worker processes;
* retrieve a process status information via RPC;
* manage the status update thread.
"""
# Note that on average Windows seems to require a longer wait.
_RPC_SERVER_TIMEOUT = 8.0
_MAXIMUM_RPC_ERRORS = 10
# Maximum number of attempts to try to start a replacement worker process.
_MAXIMUM_REPLACEMENT_RETRIES = 3
# Number of seconds to wait between attempts to start a replacement worker
# process
_REPLACEMENT_WORKER_RETRY_DELAY = 1
_PROCESS_JOIN_TIMEOUT = 5.0
_ZEROMQ_NO_WORKER_REQUEST_TIME_SECONDS = 300
def __init__(self):
"""Initializes a multi-process engine."""
super(MultiProcessEngine, self).__init__()
self._debug_output = False
self._name = 'Main'
self._log_filename = None
self._pid = os.getpid()
self._process_information = process_info.ProcessInfo(self._pid)
self._process_information_per_pid = {}
self._processes_per_pid = {}
self._quiet_mode = False
self._rpc_clients_per_pid = {}
self._rpc_errors_per_pid = {}
self._status_update_active = False
self._status_update_callback = None
self._status_update_thread = None
self._storage_writer = None
self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT
def _AbortJoin(self, timeout=None):
"""Aborts all registered processes by joining with the parent process.
Args:
timeout (int): number of seconds to wait for processes to join, where
None represents no timeout.
"""
for pid, process in iter(self._processes_per_pid.items()):
logging.debug('Waiting for process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
process.join(timeout=timeout)
if not process.is_alive():
logging.debug('Process {0:s} (PID: {1:d}) stopped.'.format(
process.name, pid))
def _AbortKill(self):
"""Aborts all registered processes by sending a SIGKILL or equivalent."""
for pid, process in iter(self._processes_per_pid.items()):
if not process.is_alive():
continue
logging.warning('Killing process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
self._KillProcess(pid)
def _AbortTerminate(self):
"""Aborts all registered processes by sending a SIGTERM or equivalent."""
for pid, process in iter(self._processes_per_pid.items()):
if not process.is_alive():
continue
logging.warning('Terminating process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
process.terminate()
def _CheckStatusWorkerProcess(self, pid):
"""Checks the status of a worker process.
If a worker process is not responding the process is terminated and
a replacement process is started.
Args:
pid (int): process ID (PID) of a registered worker process.
Raises:
KeyError: if the process is not registered with the engine.
"""
# TODO: Refactor this method, simplify and separate concerns (monitoring
# vs management).
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
process_status = self._QueryProcessStatus(process)
if process_status is None:
process_is_alive = False
else:
process_is_alive = True
process_information = self._process_information_per_pid[pid]
used_memory = process_information.GetUsedMemory() or 0
if self._worker_memory_limit and used_memory > self._worker_memory_limit:
logging.warning((
'Process: {0:s} (PID: {1:d}) killed because it exceeded the '
'memory limit: {2:d}.').format(
process.name, pid, self._worker_memory_limit))
self._KillProcess(pid)
if isinstance(process_status, dict):
self._rpc_errors_per_pid[pid] = 0
status_indicator = process_status.get('processing_status', None)
else:
rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1
self._rpc_errors_per_pid[pid] = rpc_errors
if rpc_errors > self._MAXIMUM_RPC_ERRORS:
process_is_alive = False
if process_is_alive:
rpc_port = process.rpc_port.value
logging.warning((
'Unable to retrieve process: {0:s} (PID: {1:d}) status via '
'RPC socket: http://localhost:{2:d}').format(
process.name, pid, rpc_port))
processing_status_string = 'RPC error'
status_indicator = definitions.PROCESSING_STATUS_RUNNING
else:
processing_status_string = 'killed'
status_indicator = definitions.PROCESSING_STATUS_KILLED
process_status = {
'processing_status': processing_status_string}
self._UpdateProcessingStatus(pid, process_status, used_memory)
# _UpdateProcessingStatus can also change the status of the worker,
# So refresh the status if applicable.
for worker_status in self._processing_status.workers_status:
if worker_status.pid == pid:
status_indicator = worker_status.status
break
if status_indicator in definitions.PROCESSING_ERROR_STATUS:
logging.error((
'Process {0:s} (PID: {1:d}) is not functioning correctly. '
'Status code: {2!s}.').format(process.name, pid, status_indicator))
self._TerminateProcessByPid(pid)
logging.info('Starting replacement worker process for {0:s}'.format(
process.name))
replacement_process_attempts = 0
replacement_process = None
while replacement_process_attempts < self._MAXIMUM_REPLACEMENT_RETRIES:
replacement_process_attempts += 1
replacement_process = self._StartWorkerProcess(
process.name, self._storage_writer)
if not replacement_process:
time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY)
break
if not replacement_process:
logging.error(
'Unable to create replacement worker process for: {0:s}'.format(
process.name))
def _KillProcess(self, pid):
"""Issues a SIGKILL or equivalent to the process.
Args:
pid (int): process identifier (PID).
"""
if sys.platform.startswith('win'):
process_terminate = 1
handle = ctypes.windll.kernel32.OpenProcess(
process_terminate, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
try:
os.kill(pid, signal.SIGKILL)
except OSError as exception:
logging.error('Unable to kill process {0:d} with error: {1!s}'.format(
pid, exception))
def _QueryProcessStatus(self, process):
"""Queries a process to determine its status.
Args:
process (MultiProcessBaseProcess): process to query for its status.
Returns:
dict[str, str]: status values received from the worker process.
"""
process_is_alive = process.is_alive()
if process_is_alive:
rpc_client = self._rpc_clients_per_pid.get(process.pid, None)
process_status = rpc_client.CallFunction()
else:
process_status = None
return process_status
def _RaiseIfNotMonitored(self, pid):
"""Raises if the process is not monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not monitored by the engine.
"""
if pid not in self._process_information_per_pid:
raise KeyError(
'Process (PID: {0:d}) not monitored by engine.'.format(pid))
def _RaiseIfNotRegistered(self, pid):
"""Raises if the process is not registered with the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with the engine.
"""
if pid not in self._processes_per_pid:
raise KeyError(
'Process (PID: {0:d}) not registered with engine'.format(pid))
def _RegisterProcess(self, process):
"""Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
if process.pid in self._processes_per_pid:
raise KeyError(
'Already managing process: {0!s} (PID: {1:d})'.format(
process.name, process.pid))
self._processes_per_pid[process.pid] = process
@abc.abstractmethod
def _StartWorkerProcess(self, process_name, storage_writer):
"""Creates, starts, monitors and registers a worker process.
Args:
process_name (str): process name.
storage_writer (StorageWriter): storage writer for a session storage used
to create task storage.
Returns:
MultiProcessWorkerProcess: extraction worker process.
"""
def _StartMonitoringProcess(self, process):
"""Starts monitoring a process.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is not registered with the engine or
if the process is already being monitored.
IOError: if the RPC client cannot connect to the server.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
pid = process.pid
if pid in self._process_information_per_pid:
raise KeyError(
'Already monitoring process (PID: {0:d}).'.format(pid))
if pid in self._rpc_clients_per_pid:
raise KeyError(
'RPC client (PID: {0:d}) already exists'.format(pid))
rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient()
# Make sure that a worker process has started its RPC server.
# The RPC port will be 0 if no server is available.
rpc_port = process.rpc_port.value
time_waited_for_process = 0.0
while not rpc_port:
time.sleep(0.1)
rpc_port = process.rpc_port.value
time_waited_for_process += 0.1
if time_waited_for_process >= self._RPC_SERVER_TIMEOUT:
raise IOError(
'RPC client unable to determine server (PID: {0:d}) port.'.format(
pid))
hostname = 'localhost'
if not rpc_client.Open(hostname, rpc_port):
raise IOError((
'RPC client unable to connect to server (PID: {0:d}) '
'http://{1:s}:{2:d}').format(pid, hostname, rpc_port))
self._rpc_clients_per_pid[pid] = rpc_client
self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)
def _StartStatusUpdateThread(self):
"""Starts the status update thread."""
self._status_update_active = True
self._status_update_thread = threading.Thread(
name='Status update', target=self._StatusUpdateThreadMain)
self._status_update_thread.start()
@abc.abstractmethod
def _StatusUpdateThreadMain(self):
"""Main function of the status update thread."""
def _StopMonitoringProcess(self, process):
"""Stops monitoring a process.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is not monitored.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
pid = process.pid
self._RaiseIfNotMonitored(pid)
del self._process_information_per_pid[pid]
rpc_client = self._rpc_clients_per_pid.get(pid, None)
if rpc_client:
rpc_client.Close()
del self._rpc_clients_per_pid[pid]
if pid in self._rpc_errors_per_pid:
del self._rpc_errors_per_pid[pid]
logging.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format(
process.name, pid))
def _StopMonitoringProcesses(self):
"""Stops monitoring all processes."""
# We need to make a copy of the list of pids since we are changing
# the dict in the loop.
for pid in list(self._process_information_per_pid.keys()):
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
self._StopMonitoringProcess(process)
def _StopStatusUpdateThread(self):
"""Stops the status update thread."""
self._status_update_active = False
if self._status_update_thread.isAlive():
self._status_update_thread.join()
self._status_update_thread = None
def _TerminateProcessByPid(self, pid):
"""Terminate a process that's monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with and monitored by the
engine.
"""
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
self._TerminateProcess(process)
self._StopMonitoringProcess(process)
def _TerminateProcess(self, process):
"""Terminate a process.
Args:
process (MultiProcessBaseProcess): process to terminate.
"""
pid = process.pid
logging.warning('Terminating process: (PID: {0:d}).'.format(pid))
process.terminate()
# Wait for the process to exit.
process.join(timeout=self._PROCESS_JOIN_TIMEOUT)
if process.is_alive():
logging.warning('Killing process: (PID: {0:d}).'.format(pid))
self._KillProcess(pid)
@abc.abstractmethod
def _UpdateProcessingStatus(self, pid, process_status, used_memory):
"""Updates the processing status.
Args:
pid (int): process identifier (PID) of the worker process.
process_status (dict[str, object]): status values received from
the worker process.
used_memory (int): size of used memory in bytes.
Raises:
KeyError: if the process is not registered with the engine.
"""
|
run.py
|
# -*- coding: UTF-8 -*-
from gevent import monkey
monkey.patch_all()
import multiprocessing
import config
import spider
import availability
import persistence
import web
# 进程间队列
# 爬取的代理
queue_verification = multiprocessing.Queue(config.COROUTINE_NUM)
# 待持久化的代理
queue_persistence = multiprocessing.Queue()
# 多进程列表
workers = list()
# 爬虫
workers.append(multiprocessing.Process(target=spider.worker, args=(queue_verification,)))
# 验证
workers.append(multiprocessing.Process(target=availability.worker, args=(queue_verification, queue_persistence)))
# 持久化
workers.append(multiprocessing.Process(target=persistence.worker, args=(queue_persistence,)))
# web api 服务
workers.append(multiprocessing.Process(target=web.worker))
for worker in workers:
worker.start()
for worker in workers:
worker.join()
|
rest.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" API endpoints """
import threading
from waitress import serve
from flask import Flask
from streampipes.api.resources.dummy import DummyInterimsResource
from streampipes.api.resources.processor import SepaElementResource
from streampipes.api.resources.welcome import WelcomeResource
class PipelineElementApi(object):
_FLASK_CONFIG = {
'DEBUG': False,
'DEVELOPMENT': False
}
def __init__(self):
self.app = Flask(__name__, instance_relative_config=False)
self.app.config.from_object(self._FLASK_CONFIG)
with self.app.app_context():
# register resources
SepaElementResource.register(self.app, route_base='/', route_prefix='sepa')
WelcomeResource.register(self.app, route_base='/')
# TODO: delete when finished
DummyInterimsResource.register(self.app, route_base='/')
def run(self, port: int):
print('serving API via waitress WSGI server ... http://{}:{}'.format('0.0.0.0', port))
threading.Thread(target=serve, args=(self.app,), kwargs={'host': '0.0.0.0', 'port': int(port), '_quiet': True}).start()
|
parallel_cp.py
|
#!/usr/bin/env python3
# vim: tabstop=4:shiftwidth=4:smarttab:noexpandtab
# parallel_cp.py
# Copy a file (presumably from a network-mounted filesystem)
# in multiple parts simultaneously to minimize the slowing effects
# of latency or a shared connection (at the expense of increased disk IO).
#
# You can accomplish the same thing (sans progressbar) using GNU's parallel and dd.
# parallel_cp.py requires progressbar (https://pypi.python.org/pypi/progressbar),
# which can be installed with pip.
import os
import sys
import argparse
import time
from multiprocessing import Process, Pipe, active_children
from progressbar import ProgressBar, Bar, Counter, ETA, Percentage
def main():
args = get_arguments()
# Set up children, then watch their progress
source_size = os.path.getsize(args.source_file)
print("Copying %s bytes using %s children" % (source_size, args.parts))
children = spawn_children(args.source_file, args.destination_file, args.parts, source_size)
show_progress(children, source_size)
# Merge files and finish up
print("Merging copied files")
merge_files(args.destination_file, args.parts)
print("All done!")
sys.exit(0)
# Parse all arguments and return a namespace
def get_arguments(): # Parse our arguments
parser = argparse.ArgumentParser(description='Copy a file in parts in parallel.')
parser.add_argument('source_file', help="Path of source file.")
parser.add_argument('destination_file', help="Path of destination file.")
parser.add_argument('-p', '--parts', type=int, help="Number of parts to split the copy into.", default=5)
args = parser.parse_args()
# Check for a directory target and handle appropriately
if os.path.isdir(args.destination_file):
file_name = os.path.basename(args.source_file)
args.destination_file = os.path.join(args.destination_file, file_name)
return args
# Returns an array of Child objects with length <parts>
def spawn_children(source_file, destination_file, parts, source_size):
children = [] # an array of Child objects
for i in range(0,parts):
pipes = Pipe() # Create pipe, one end for the parent and one end for this child
p = Process(target=partial_copy, args=(source_file, destination_file, source_size, i, parts, pipes[0]))
p.start()
child = Child(p, pipes[1], get_copy_offsets(i, parts, source_size)[2])
children.append(child)
return children
# Creates a progress bar and updates it until all children exit
def show_progress(children, source_size):
# Set up progressbar
widgets = [Counter(), '/', str(source_size), " (", Percentage(), ") ", Bar(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=source_size)
pbar.start()
# Update progressbar until copies are all complete
while active_children():
current_sum = 0
for c in children:
c.update()
current_sum += c.bytes_copied
pbar.update(current_sum)
time.sleep(1)
pbar.finish()
# Merges all the slices into dest_file
def merge_files(dest_file, parts):
# Loop through all the files and append onto first
with open("%s.0" % dest_file, 'ab') as out_fh:
# Loop through each (other) slice, and append to the first
for i in range(1,parts):
current_file = "%s.%s" % (dest_file, i)
with open(current_file, 'rb') as in_fh:
out_fh.write(in_fh.read())
os.remove(current_file)
# Rename the first slice, and we're all done
os.rename("%s.0" % dest_file, dest_file)
# Copy a slice of a file, reporting process to parent when asked
# By default, this will copy in 1MB blocks (for status reporting)
def partial_copy(path_from, path_to, size_from, proc_num, total_procs, output, block_size=1048576):
with open(path_from, 'rb') as in_fh, open("%s.%s" % (path_to, proc_num), 'wb') as out_fh:
# Figure out what part to copy
start_pos, end_pos, read_len = get_copy_offsets(proc_num, total_procs, size_from)
in_fh.seek(start_pos)
bytes_read = 0
# Until we've copied the whole slice, keep going
while bytes_read < read_len:
# Communication with parent; any input is a request for progress
if output.poll():
output.recv() # reset poll() to False
output.send(bytes_read)
# Calculate remaining bytes, then copy
bytes_remaining = read_len - bytes_read
if bytes_remaining > block_size: # copy a full block
tmp = in_fh.read(block_size)
out_fh.write(tmp)
bytes_read += block_size
else: # copy remaining data (< 1 block)
out_fh.write(in_fh.read(bytes_remaining))
bytes_read += bytes_remaining
# Returns (start/end/length) of the slice to be copied
def get_copy_offsets(proc_num, total_procs, filesize):
getpos = lambda pnum, tprocs, fsize: int(float(pnum)/tprocs * fsize)
start_pos = getpos(proc_num, total_procs, filesize)
end_pos = getpos(proc_num+1, total_procs, filesize)
read_len = end_pos - start_pos
return (start_pos, end_pos, read_len)
# A class to wrap the concept of a child process
class Child (object):
proc = None
pipe = None
bytes_copied = 0
bytes_to_copy = 0
def __init__(self, proc, pipe, bytes_to_copy):
self.proc = proc
self.pipe = pipe
self.bytes_to_copy = bytes_to_copy
def update(self):
if not self.proc.is_alive():
self.bytes_copied = self.bytes_to_copy
else:
if self.pipe.poll():
self.bytes_copied = self.pipe.recv()
self.pipe.send('') # request another update
# Our shim to invoke main()
if __name__ == '__main__':
main()
|
gcsio_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for Google Cloud Storage client."""
import errno
import logging
import multiprocessing
import os
import random
import threading
import unittest
import httplib2
import mock
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.gcp import gcsio
from apache_beam.io.gcp.internal.clients import storage
from apitools.base.py.exceptions import HttpError
except ImportError:
HttpError = None
# pylint: enable=wrong-import-order, wrong-import-position
class FakeGcsClient(object):
# Fake storage client. Usage in gcsio.py is client.objects.Get(...) and
# client.objects.Insert(...).
def __init__(self):
self.objects = FakeGcsObjects()
# Referenced in GcsIO.batch_copy() and GcsIO.batch_delete().
self._http = object()
class FakeFile(object):
def __init__(self, bucket, obj, contents, generation):
self.bucket = bucket
self.object = obj
self.contents = contents
self.generation = generation
def get_metadata(self):
return storage.Object(
bucket=self.bucket,
name=self.object,
generation=self.generation,
size=len(self.contents))
class FakeGcsObjects(object):
def __init__(self):
self.files = {}
# Store the last generation used for a given object name. Note that this
# has to persist even past the deletion of the object.
self.last_generation = {}
self.list_page_tokens = {}
def add_file(self, f):
self.files[(f.bucket, f.object)] = f
self.last_generation[(f.bucket, f.object)] = f.generation
def get_file(self, bucket, obj):
return self.files.get((bucket, obj), None)
def delete_file(self, bucket, obj):
del self.files[(bucket, obj)]
def get_last_generation(self, bucket, obj):
return self.last_generation.get((bucket, obj), 0)
def Get(self, get_request, download=None): # pylint: disable=invalid-name
f = self.get_file(get_request.bucket, get_request.object)
if f is None:
# Failing with a HTTP 404 if file does not exist.
raise HttpError({'status': 404}, None, None)
if download is None:
return f.get_metadata()
else:
stream = download.stream
def get_range_callback(start, end):
assert start >= 0 and end >= start and end < len(f.contents)
stream.write(f.contents[start:end + 1])
download.GetRange = get_range_callback
def Insert(self, insert_request, upload=None): # pylint: disable=invalid-name
assert upload is not None
generation = self.get_last_generation(insert_request.bucket,
insert_request.name) + 1
f = FakeFile(insert_request.bucket, insert_request.name, '', generation)
# Stream data into file.
stream = upload.stream
data_list = []
while True:
data = stream.read(1024 * 1024)
if not data:
break
data_list.append(data)
f.contents = ''.join(data_list)
self.add_file(f)
def Copy(self, copy_request): # pylint: disable=invalid-name
src_file = self.get_file(copy_request.sourceBucket,
copy_request.sourceObject)
if not src_file:
raise HttpError(
httplib2.Response({'status': '404'}), '404 Not Found',
'https://fake/url')
generation = self.get_last_generation(copy_request.destinationBucket,
copy_request.destinationObject) + 1
dest_file = FakeFile(copy_request.destinationBucket,
copy_request.destinationObject, src_file.contents,
generation)
self.add_file(dest_file)
def Delete(self, delete_request): # pylint: disable=invalid-name
# Here, we emulate the behavior of the GCS service in raising a 404 error
# if this object already exists.
if self.get_file(delete_request.bucket, delete_request.object):
self.delete_file(delete_request.bucket, delete_request.object)
else:
raise HttpError(
httplib2.Response({'status': '404'}), '404 Not Found',
'https://fake/url')
def List(self, list_request): # pylint: disable=invalid-name
bucket = list_request.bucket
prefix = list_request.prefix or ''
matching_files = []
for file_bucket, file_name in sorted(iter(self.files)):
if bucket == file_bucket and file_name.startswith(prefix):
file_object = self.files[(file_bucket, file_name)].get_metadata()
matching_files.append(file_object)
# Handle pagination.
items_per_page = 5
if not list_request.pageToken:
range_start = 0
else:
if list_request.pageToken not in self.list_page_tokens:
raise ValueError('Invalid page token.')
range_start = self.list_page_tokens[list_request.pageToken]
del self.list_page_tokens[list_request.pageToken]
result = storage.Objects(
items=matching_files[range_start:range_start + items_per_page])
if range_start + items_per_page < len(matching_files):
next_range_start = range_start + items_per_page
next_page_token = '_page_token_%s_%s_%d' % (bucket, prefix,
next_range_start)
self.list_page_tokens[next_page_token] = next_range_start
result.nextPageToken = next_page_token
return result
class FakeApiCall(object):
def __init__(self, exception):
self.exception = exception
self.is_error = exception is not None
class FakeBatchApiRequest(object):
def __init__(self, **unused_kwargs):
self.operations = []
def Add(self, service, method, request): # pylint: disable=invalid-name
self.operations.append((service, method, request))
def Execute(self, unused_http, **unused_kwargs): # pylint: disable=invalid-name
api_calls = []
for service, method, request in self.operations:
exception = None
try:
getattr(service, method)(request)
except Exception as e: # pylint: disable=broad-except
exception = e
api_calls.append(FakeApiCall(exception))
return api_calls
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestGCSPathParser(unittest.TestCase):
def test_gcs_path(self):
self.assertEqual(
gcsio.parse_gcs_path('gs://bucket/name'), ('bucket', 'name'))
self.assertEqual(
gcsio.parse_gcs_path('gs://bucket/name/sub'), ('bucket', 'name/sub'))
def test_bad_gcs_path(self):
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs://')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs://bucket')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs://bucket/')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs:///name')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs:///')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs:/blah/bucket/name')
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestGCSIO(unittest.TestCase):
def _insert_random_file(self, client, path, size, generation=1):
bucket, name = gcsio.parse_gcs_path(path)
f = FakeFile(bucket, name, os.urandom(size), generation)
client.objects.add_file(f)
return f
def setUp(self):
self.client = FakeGcsClient()
self.gcs = gcsio.GcsIO(self.client)
def test_exists(self):
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
self._insert_random_file(self.client, file_name, file_size)
self.assertFalse(self.gcs.exists(file_name + 'xyz'))
self.assertTrue(self.gcs.exists(file_name))
@mock.patch.object(FakeGcsObjects, 'Get')
def test_exists_failure(self, mock_get):
# Raising an error other than 404. Raising 404 is a valid failure for
# exists() call.
mock_get.side_effect = HttpError({'status': 400}, None, None)
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
self._insert_random_file(self.client, file_name, file_size)
with self.assertRaises(HttpError) as cm:
self.gcs.exists(file_name)
self.assertEquals(400, cm.exception.status_code)
def test_size(self):
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
self._insert_random_file(self.client, file_name, file_size)
self.assertTrue(self.gcs.exists(file_name))
self.assertEqual(1234, self.gcs.size(file_name))
def test_file_mode(self):
file_name = 'gs://gcsio-test/dummy_mode_file'
with self.gcs.open(file_name, 'wb') as f:
assert f.mode == 'wb'
with self.gcs.open(file_name, 'rb') as f:
assert f.mode == 'rb'
def test_bad_file_modes(self):
file_name = 'gs://gcsio-test/dummy_mode_file'
with self.assertRaises(ValueError):
self.gcs.open(file_name, 'w+')
with self.assertRaises(ValueError):
self.gcs.open(file_name, 'r+b')
def test_empty_batches(self):
self.assertEqual([], self.gcs.copy_batch([]))
self.assertEqual([], self.gcs.delete_batch([]))
def test_delete(self):
file_name = 'gs://gcsio-test/delete_me'
file_size = 1024
# Test deletion of non-existent file.
self.gcs.delete(file_name)
self._insert_random_file(self.client, file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(file_name) in self.client.objects.files)
self.gcs.delete(file_name)
self.assertFalse(
gcsio.parse_gcs_path(file_name) in self.client.objects.files)
@mock.patch('apache_beam.io.gcp.gcsio.BatchApiRequest')
def test_delete_batch(self, *unused_args):
gcsio.BatchApiRequest = FakeBatchApiRequest
file_name_pattern = 'gs://gcsio-test/delete_me_%d'
file_size = 1024
num_files = 10
# Test deletion of non-existent files.
result = self.gcs.delete_batch(
[file_name_pattern % i for i in range(num_files)])
self.assertTrue(result)
for i, (file_name, exception) in enumerate(result):
self.assertEqual(file_name, file_name_pattern % i)
self.assertEqual(exception, None)
self.assertFalse(self.gcs.exists(file_name_pattern % i))
# Insert some files.
for i in range(num_files):
self._insert_random_file(self.client, file_name_pattern % i, file_size)
# Check files inserted properly.
for i in range(num_files):
self.assertTrue(self.gcs.exists(file_name_pattern % i))
# Execute batch delete.
self.gcs.delete_batch([file_name_pattern % i for i in range(num_files)])
# Check files deleted properly.
for i in range(num_files):
self.assertFalse(self.gcs.exists(file_name_pattern % i))
def test_copy(self):
src_file_name = 'gs://gcsio-test/source'
dest_file_name = 'gs://gcsio-test/dest'
file_size = 1024
self._insert_random_file(self.client, src_file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertFalse(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.gcs.copy(src_file_name, dest_file_name)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertTrue(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.assertRaises(IOError, self.gcs.copy, 'gs://gcsio-test/non-existent',
'gs://gcsio-test/non-existent-destination')
@mock.patch('apache_beam.io.gcp.gcsio.BatchApiRequest')
def test_copy_batch(self, *unused_args):
gcsio.BatchApiRequest = FakeBatchApiRequest
from_name_pattern = 'gs://gcsio-test/copy_me_%d'
to_name_pattern = 'gs://gcsio-test/destination_%d'
file_size = 1024
num_files = 10
# Test copy of non-existent files.
result = self.gcs.copy_batch(
[(from_name_pattern % i, to_name_pattern % i)
for i in range(num_files)])
self.assertTrue(result)
for i, (src, dest, exception) in enumerate(result):
self.assertEqual(src, from_name_pattern % i)
self.assertEqual(dest, to_name_pattern % i)
self.assertTrue(isinstance(exception, IOError))
self.assertEqual(exception.errno, errno.ENOENT)
self.assertFalse(self.gcs.exists(from_name_pattern % i))
self.assertFalse(self.gcs.exists(to_name_pattern % i))
# Insert some files.
for i in range(num_files):
self._insert_random_file(self.client, from_name_pattern % i, file_size)
# Check files inserted properly.
for i in range(num_files):
self.assertTrue(self.gcs.exists(from_name_pattern % i))
# Execute batch copy.
self.gcs.copy_batch([(from_name_pattern % i, to_name_pattern % i)
for i in range(num_files)])
# Check files copied properly.
for i in range(num_files):
self.assertTrue(self.gcs.exists(from_name_pattern % i))
self.assertTrue(self.gcs.exists(to_name_pattern % i))
def test_copytree(self):
src_dir_name = 'gs://gcsio-test/source/'
dest_dir_name = 'gs://gcsio-test/dest/'
file_size = 1024
paths = ['a', 'b/c', 'b/d']
for path in paths:
src_file_name = src_dir_name + path
dest_file_name = dest_dir_name + path
self._insert_random_file(self.client, src_file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertFalse(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.gcs.copytree(src_dir_name, dest_dir_name)
for path in paths:
src_file_name = src_dir_name + path
dest_file_name = dest_dir_name + path
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertTrue(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
def test_rename(self):
src_file_name = 'gs://gcsio-test/source'
dest_file_name = 'gs://gcsio-test/dest'
file_size = 1024
self._insert_random_file(self.client, src_file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertFalse(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.gcs.rename(src_file_name, dest_file_name)
self.assertFalse(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertTrue(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
def test_full_file_read(self):
file_name = 'gs://gcsio-test/full_file'
file_size = 5 * 1024 * 1024 + 100
random_file = self._insert_random_file(self.client, file_name, file_size)
f = self.gcs.open(file_name)
self.assertEqual(f.mode, 'r')
f.seek(0, os.SEEK_END)
self.assertEqual(f.tell(), file_size)
self.assertEqual(f.read(), '')
f.seek(0)
self.assertEqual(f.read(), random_file.contents)
def test_file_random_seek(self):
file_name = 'gs://gcsio-test/seek_file'
file_size = 5 * 1024 * 1024 - 100
random_file = self._insert_random_file(self.client, file_name, file_size)
f = self.gcs.open(file_name)
random.seed(0)
for _ in range(0, 10):
a = random.randint(0, file_size - 1)
b = random.randint(0, file_size - 1)
start, end = min(a, b), max(a, b)
f.seek(start)
self.assertEqual(f.tell(), start)
self.assertEqual(
f.read(end - start + 1), random_file.contents[start:end + 1])
self.assertEqual(f.tell(), end + 1)
def test_file_iterator(self):
file_name = 'gs://gcsio-test/iterating_file'
lines = []
line_count = 10
for _ in range(line_count):
line_length = random.randint(100, 500)
line = os.urandom(line_length).replace('\n', ' ') + '\n'
lines.append(line)
contents = ''.join(lines)
bucket, name = gcsio.parse_gcs_path(file_name)
self.client.objects.add_file(FakeFile(bucket, name, contents, 1))
f = self.gcs.open(file_name)
read_lines = 0
for line in f:
read_lines += 1
self.assertEqual(read_lines, line_count)
def test_file_read_line(self):
file_name = 'gs://gcsio-test/read_line_file'
lines = []
# Set a small buffer size to exercise refilling the buffer.
# First line is carefully crafted so the newline falls as the last character
# of the buffer to exercise this code path.
read_buffer_size = 1024
lines.append('x' * 1023 + '\n')
for _ in range(1, 1000):
line_length = random.randint(100, 500)
line = os.urandom(line_length).replace('\n', ' ') + '\n'
lines.append(line)
contents = ''.join(lines)
file_size = len(contents)
bucket, name = gcsio.parse_gcs_path(file_name)
self.client.objects.add_file(FakeFile(bucket, name, contents, 1))
f = self.gcs.open(file_name, read_buffer_size=read_buffer_size)
# Test read of first two lines.
f.seek(0)
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.tell(), len(lines[0]))
self.assertEqual(f.readline(), lines[1])
# Test read at line boundary.
f.seek(file_size - len(lines[-1]) - 1)
self.assertEqual(f.readline(), '\n')
# Test read at end of file.
f.seek(file_size)
self.assertEqual(f.readline(), '')
# Test reads at random positions.
random.seed(0)
for _ in range(0, 10):
start = random.randint(0, file_size - 1)
line_index = 0
# Find line corresponding to start index.
chars_left = start
while True:
next_line_length = len(lines[line_index])
if chars_left - next_line_length < 0:
break
chars_left -= next_line_length
line_index += 1
f.seek(start)
self.assertEqual(f.readline(), lines[line_index][chars_left:])
def test_file_write(self):
file_name = 'gs://gcsio-test/write_file'
file_size = 5 * 1024 * 1024 + 2000
contents = os.urandom(file_size)
f = self.gcs.open(file_name, 'w')
self.assertEqual(f.mode, 'w')
f.write(contents[0:1000])
f.write(contents[1000:1024 * 1024])
f.write(contents[1024 * 1024:])
f.close()
bucket, name = gcsio.parse_gcs_path(file_name)
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
def test_file_close(self):
file_name = 'gs://gcsio-test/close_file'
file_size = 5 * 1024 * 1024 + 2000
contents = os.urandom(file_size)
f = self.gcs.open(file_name, 'w')
self.assertEqual(f.mode, 'w')
f.write(contents)
f.close()
f.close() # This should not crash.
bucket, name = gcsio.parse_gcs_path(file_name)
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
def test_file_flush(self):
file_name = 'gs://gcsio-test/flush_file'
file_size = 5 * 1024 * 1024 + 2000
contents = os.urandom(file_size)
bucket, name = gcsio.parse_gcs_path(file_name)
f = self.gcs.open(file_name, 'w')
self.assertEqual(f.mode, 'w')
f.write(contents[0:1000])
f.flush()
f.write(contents[1000:1024 * 1024])
f.flush()
f.flush() # Should be a NOOP.
f.write(contents[1024 * 1024:])
f.close() # This should already call the equivalent of flush() in its body.
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
def test_context_manager(self):
# Test writing with a context manager.
file_name = 'gs://gcsio-test/context_manager_file'
file_size = 1024
contents = os.urandom(file_size)
with self.gcs.open(file_name, 'w') as f:
f.write(contents)
bucket, name = gcsio.parse_gcs_path(file_name)
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
# Test reading with a context manager.
with self.gcs.open(file_name) as f:
self.assertEqual(f.read(), contents)
# Test that exceptions are not swallowed by the context manager.
with self.assertRaises(ZeroDivisionError):
with self.gcs.open(file_name) as f:
f.read(0 / 0)
def test_glob(self):
bucket_name = 'gcsio-test'
object_names = [
'cow/cat/fish',
'cow/cat/blubber',
'cow/dog/blubber',
'apple/dog/blubber',
'apple/fish/blubber',
'apple/fish/blowfish',
'apple/fish/bambi',
'apple/fish/balloon',
'apple/fish/cat',
'apple/fish/cart',
'apple/fish/carl',
'apple/fish/handle',
'apple/dish/bat',
'apple/dish/cat',
'apple/dish/carl',
]
for object_name in object_names:
file_name = 'gs://%s/%s' % (bucket_name, object_name)
self._insert_random_file(self.client, file_name, 0)
test_cases = [
('gs://gcsio-test/*', [
'cow/cat/fish',
'cow/cat/blubber',
'cow/dog/blubber',
'apple/dog/blubber',
'apple/fish/blubber',
'apple/fish/blowfish',
'apple/fish/bambi',
'apple/fish/balloon',
'apple/fish/cat',
'apple/fish/cart',
'apple/fish/carl',
'apple/fish/handle',
'apple/dish/bat',
'apple/dish/cat',
'apple/dish/carl',
]),
('gs://gcsio-test/cow/*', [
'cow/cat/fish',
'cow/cat/blubber',
'cow/dog/blubber',
]),
('gs://gcsio-test/cow/ca*', [
'cow/cat/fish',
'cow/cat/blubber',
]),
('gs://gcsio-test/apple/[df]ish/ca*', [
'apple/fish/cat',
'apple/fish/cart',
'apple/fish/carl',
'apple/dish/cat',
'apple/dish/carl',
]),
('gs://gcsio-test/apple/fish/car?', [
'apple/fish/cart',
'apple/fish/carl',
]),
('gs://gcsio-test/apple/fish/b*', [
'apple/fish/blubber',
'apple/fish/blowfish',
'apple/fish/bambi',
'apple/fish/balloon',
]),
('gs://gcsio-test/apple/f*/b*', [
'apple/fish/blubber',
'apple/fish/blowfish',
'apple/fish/bambi',
'apple/fish/balloon',
]),
('gs://gcsio-test/apple/dish/[cb]at', [
'apple/dish/bat',
'apple/dish/cat',
]),
]
for file_pattern, expected_object_names in test_cases:
expected_file_names = ['gs://%s/%s' % (bucket_name, o)
for o in expected_object_names]
self.assertEqual(
set(self.gcs.glob(file_pattern)), set(expected_file_names))
# Check if limits are followed correctly
limit = 3
for file_pattern, expected_object_names in test_cases:
expected_num_items = min(len(expected_object_names), limit)
self.assertEqual(
len(self.gcs.glob(file_pattern, limit)), expected_num_items)
def test_size_of_files_in_glob(self):
bucket_name = 'gcsio-test'
object_names = [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
('apple/dog/blubber', 5),
('apple/fish/blubber', 6),
('apple/fish/blowfish', 7),
('apple/fish/bambi', 8),
('apple/fish/balloon', 9),
('apple/fish/cat', 10),
('apple/fish/cart', 11),
('apple/fish/carl', 12),
('apple/dish/bat', 13),
('apple/dish/cat', 14),
('apple/dish/carl', 15),
('apple/fish/handle', 16),
]
for (object_name, size) in object_names:
file_name = 'gs://%s/%s' % (bucket_name, object_name)
self._insert_random_file(self.client, file_name, size)
test_cases = [
('gs://gcsio-test/cow/*', [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
]),
('gs://gcsio-test/apple/fish/car?', [
('apple/fish/cart', 11),
('apple/fish/carl', 12),
]),
('gs://gcsio-test/*/f*/car?', [
('apple/fish/cart', 11),
('apple/fish/carl', 12),
]),
]
for file_pattern, expected_object_names in test_cases:
expected_file_sizes = {'gs://%s/%s' % (bucket_name, o): s
for (o, s) in expected_object_names}
self.assertEqual(
self.gcs.size_of_files_in_glob(file_pattern), expected_file_sizes)
# Check if limits are followed correctly
limit = 1
for file_pattern, expected_object_names in test_cases:
expected_num_items = min(len(expected_object_names), limit)
self.assertEqual(
len(self.gcs.glob(file_pattern, limit)), expected_num_items)
def test_size_of_files_in_glob_limited(self):
bucket_name = 'gcsio-test'
object_names = [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
('apple/dog/blubber', 5),
('apple/fish/blubber', 6),
('apple/fish/blowfish', 7),
('apple/fish/bambi', 8),
('apple/fish/balloon', 9),
('apple/fish/cat', 10),
('apple/fish/cart', 11),
('apple/fish/carl', 12),
('apple/dish/bat', 13),
('apple/dish/cat', 14),
('apple/dish/carl', 15),
]
for (object_name, size) in object_names:
file_name = 'gs://%s/%s' % (bucket_name, object_name)
self._insert_random_file(self.client, file_name, size)
test_cases = [
('gs://gcsio-test/cow/*', [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
]),
('gs://gcsio-test/apple/fish/car?', [
('apple/fish/cart', 11),
('apple/fish/carl', 12),
])
]
# Check if limits are followed correctly
limit = 1
for file_pattern, expected_object_names in test_cases:
expected_num_items = min(len(expected_object_names), limit)
self.assertEqual(
len(self.gcs.glob(file_pattern, limit)), expected_num_items)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestPipeStream(unittest.TestCase):
def _read_and_verify(self, stream, expected, buffer_size):
data_list = []
bytes_read = 0
seen_last_block = False
while True:
data = stream.read(buffer_size)
self.assertLessEqual(len(data), buffer_size)
if len(data) < buffer_size:
# Test the constraint that the pipe stream returns less than the buffer
# size only when at the end of the stream.
if data:
self.assertFalse(seen_last_block)
seen_last_block = True
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(''.join(data_list), expected)
def test_pipe_stream(self):
block_sizes = list(4**i for i in range(0, 12))
data_blocks = list(os.urandom(size) for size in block_sizes)
expected = ''.join(data_blocks)
buffer_sizes = [100001, 512 * 1024, 1024 * 1024]
for buffer_size in buffer_sizes:
parent_conn, child_conn = multiprocessing.Pipe()
stream = gcsio.GcsBufferedWriter.PipeStream(child_conn)
child_thread = threading.Thread(
target=self._read_and_verify, args=(stream, expected, buffer_size))
child_thread.start()
for data in data_blocks:
parent_conn.send_bytes(data)
parent_conn.close()
child_thread.join()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
cursor_wrapper.py
|
import logging
from threading import Thread
from time import time
from django.db.backends.utils import CursorWrapper
from .log_object import SqlLogObject, settings
log = logging.getLogger('dl_logger')
class CursorLogWrapper(CursorWrapper):
def execute(self, sql, params=None):
return self.log_query(super(CursorLogWrapper, self).execute, sql, params)
def executemany(self, sql, param_list):
return self.log_query(super(CursorLogWrapper, self).executemany, sql, param_list)
def log_query(self, method, *args):
start = time()
try:
return method(*args)
finally:
stop = time()
duration = stop - start
def do_log(cursor, *log_args):
if duration < settings.SQL_THRESHOLD:
return
sql = self.db.ops.last_executed_query(cursor, *log_args)
sql_info = {
'sql': sql,
'time': "%.3f" % duration
}
self.db.queries_log.append(sql_info)
record = SqlLogObject(sql_info)
log.info(record)
Thread(target=do_log, args=(self.cursor, *args)).start()
|
BaseSpider.py
|
import datetime
from src.threadPool.ImageThreadPool import ImageThreadPool
from src.util import util
from copy import deepcopy
import json
from src.util.constant import BASE_DIR, EXPIRE_TIME_IN_SECONDS, BASE_PATH, QR_CODE_MAP_KEY
import re
import logging
from src.web.entity.UserInfo import UserInfo
from src.web.web_util.web_util import get_redis_conn
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import threading
import random
class BaseSpider(object):
"""
基类,初始化与爬虫相关的工具和方法
"""
def __init__(self, use_redis=False, debug=False, mood_begin=0, mood_num=-1, stop_time='-1',
download_small_image=False, download_big_image=False,
download_mood_detail=True, download_like_detail=True, download_like_names=True, recover=False,
cookie_text=None, from_web=False, username='', nickname='', no_delete=True, pool_flag='127.0.0.1', from_client=False, get_visit=False):
# 初始化下载项
self.mood_begin = mood_begin
self.mood_num = mood_num
self.recover = recover
self.download_small_image = download_small_image
self.download_big_image = download_big_image
self.download_mood_detail = download_mood_detail
self.download_like_detail = download_like_detail
self.download_like_names = download_like_names
# 控制线程数量,包括获取动态的线程数量和好友数据的线程数量,默认为10,这里表示两个子任务都开启10个线程
self.thread_num = 10
self.thread_list = []
self.from_client = from_client
self.no_delete = no_delete
if stop_time != '-1':
self.stop_time = util.get_mktime(stop_time)
else:
self.stop_time = -1
self.begin_time = datetime.datetime.now()
self.host = 'https://user.qzone.qq.com'
self.h5_host = 'h5.qzone.qq.com'
self.http_host = 'http://user.qzone.qq.com'
self.use_redis = use_redis
self.debug = debug
self.cookie_text = cookie_text
self.pool_flag = pool_flag
self.from_web = from_web
self.random_qr_name = str(random.random())
self.get_visit = get_visit
self.QR_CODE_PATH = BASE_PATH + '/src/web/static/image/qr' + self.random_qr_name
self.headers = {
'host': 'user.qzone.qq.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.8',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:66.0) Gecko/20100101 Firefox/66.0',
'connection': 'keep-alive'
}
self.h5_headers = deepcopy(self.headers)
self.h5_headers['host'] = self.h5_host
self.visit_list = []
if use_redis:
self.re = self.connect_redis()
if not from_web and not from_client:
self.username, self.password, self.nickname = self.get_username_password()
else:
self.username = username
self.nickname = nickname
# 保存用户的二维码名称,传递给前端
if self.use_redis:
self.re.hset(QR_CODE_MAP_KEY, self.username, self.random_qr_name)
self.init_user_info()
self.image_thread_pool = ImageThreadPool(20)
def init_user_info(self):
self.init_file_name()
self.mood_host = self.http_host + '/' + self.username + '/mood/'
# 在爬取好友动态时username会变为好友的QQ号,所以此处需要备份
self.raw_username = deepcopy(self.username)
self.raw_nickname = deepcopy(self.nickname)
self.user_info = UserInfo(self.username).load()
if self.user_info is None:
self.user_info = UserInfo(self.username)
self.user_info.QQ = self.username
self.user_info.nickname = self.nickname
def get_username_password(self):
config_path = BASE_DIR + 'config/userinfo.json'
try:
with open(config_path, 'r', encoding='utf-8') as r:
userinfo = json.load(r)
return userinfo['username'], userinfo['password'], userinfo['nick_name']
except:
print("Error: File Not Found==============")
print("请检查配置文件是否正确配置!!!!")
print("Please check config file")
print("Path:", config_path)
exit(1)
# 将响应字符串转化为标准Json
def get_json(self, str1):
arr = re.findall(r'[^()]+', str1)
# for i in range(1, len(arr) - 1):
# json += arr[i]
json = "".join(arr[1:-1])
return json.strip()
# 从本地恢复数据(用于爬虫意外中断之后的数据恢复)
def do_recover_from_exist_data(self):
if self.use_redis:
try:
self.content = json.loads(self.re.get(self.CONTENT_FILE_NAME))
self.like_list_names = json.loads(self.re.get(self.LIKE_LIST_NAME_FILE_NAME))
self.mood_details = json.loads(self.re.get(self.MOOD_DETAIL_FILE_NAME))
self.like_detail = json.loads(self.re.get(self.LIKE_DETAIL_FILE_NAME))
if self.debug:
print('Finish to recover data from redis:')
print('content:', len(self.content))
print('like_list_names:', len(self.like_list_names))
print('mood_details:', len(self.mood_details))
print('like_detail:', len(self.like_detail))
return len(self.like_list_names)
except BaseException as e:
self.format_error(e, 'Failed to recover data from redis')
print('Now, try to recover data from json files...')
self.load_all_data_from_json()
else:
self.load_all_data_from_json()
def format_error(self, e, msg=""):
if not self.from_client:
print('ERROR===================')
print(e)
print(msg)
logging.error(e)
logging.error(msg)
print('ERROR===================')
if self.debug:
# raise e
pass
def logging_info(self, info):
logging.info(info)
def init_parameter(self):
self.mood_count = 0
self.like_detail = []
self.like_list_names = []
self.content = []
self.unikeys = []
self.tid = ""
self.mood_details = []
self.error_like_detail_unikeys = []
self.error_like_list_unikeys = []
self.error_mood_unikeys = []
self.error_like_detail = {}
self.error_like_list = {}
self.error_mood = {}
self.until_stop_time = True
def init_file_name(self):
"""
初始化所有文件名
:return:
"""
self.USER_BASE_DIR = BASE_DIR + self.username + '/'
logging_dir = self.USER_BASE_DIR + 'log/'
if self.debug:
print("logging_dir:", logging_dir)
util.check_dir_exist(logging_dir)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=logging_dir + self.username + '.log',
filemode='w+')
logging.info('file_name_head:' + self.username)
DATA_DIR_HEAD = self.USER_BASE_DIR + 'data/'
self.CONTENT_FILE_NAME = DATA_DIR_HEAD + 'QQ_content.json'
self.LIKE_DETAIL_FILE_NAME = DATA_DIR_HEAD + 'QQ_like_detail' + '.json'
self.LIKE_LIST_NAME_FILE_NAME = DATA_DIR_HEAD + 'QQ_like_list_name' + '.json'
self.MOOD_DETAIL_FILE_NAME = DATA_DIR_HEAD + 'QQ_mood_detail' + '.json'
ERROR_DIR_HEAD = self.USER_BASE_DIR + 'error/'
self.ERROR_LIKE_DETAIL_FILE_NAME = ERROR_DIR_HEAD + 'QQ_like_detail_error' + '.json'
self.ERROR_LIKE_LIST_NAME_FILE_NAME = ERROR_DIR_HEAD + 'QQ_like_list_name_error' + '.json'
self.ERROR_MOOD_DETAIL_FILE_NAME = ERROR_DIR_HEAD + 'QQ_mood_detail_error' + '.json'
self.ERROR_LIKE_DETAIL_UNIKEY_FILE_NAME = ERROR_DIR_HEAD + 'QQ_like_detail_error_unikey' + '.txt'
self.ERROR_LIKE_LIST_NAME_UNIKEY_FILE_NAME = ERROR_DIR_HEAD + 'QQ_like_list_error_unikey' + '.txt'
self.ERROR_MOOD_DETAIL_UNIKEY_FILE_NAME = ERROR_DIR_HEAD + 'QQ_mood_detail_error_unikey' + '.txt'
self.SMALL_IMAGE_DIR = self.USER_BASE_DIR + 'qq_image/'
self.BIG_IMAGE_DIR = self.USER_BASE_DIR + 'qq_big_image/'
util.check_dir_exist(DATA_DIR_HEAD)
util.check_dir_exist(ERROR_DIR_HEAD)
util.check_dir_exist(self.SMALL_IMAGE_DIR)
util.check_dir_exist(self.BIG_IMAGE_DIR)
USER_BASE_DIR = BASE_DIR + self.username + '/'
util.check_dir_exist(USER_BASE_DIR)
FRIEND_DIR_HEAD = USER_BASE_DIR + 'friend/'
self.FRIEND_LIST_FILE_NAME = FRIEND_DIR_HEAD + 'friend_list.json'
self.FRIEND_DETAIL_FILE_NAME = FRIEND_DIR_HEAD + 'friend_detail.json'
self.FRIEND_DETAIL_LIST_FILE_NAME = FRIEND_DIR_HEAD + 'friend_detail_list.csv'
self.FRIEND_DETAIL_EXCEL_FILE_NAME = FRIEND_DIR_HEAD + 'friend_detail_list.xlsx'
# 头像下载到web的static文件夹,以便在web中调用
self.FRIEND_HEADER_IMAGE_PATH = BASE_PATH + '/src/web/static/image/' + self.username + '/header/'
self.web_image_bash_path = BASE_PATH + '/src/web/static/image/'+ self.username + '/'
util.check_dir_exist(USER_BASE_DIR + 'friend/')
util.check_dir_exist(self.FRIEND_HEADER_IMAGE_PATH)
self.init_analysis_path()
if self.debug:
print("Init file Name Finish:", self.USER_BASE_DIR)
def init_analysis_path(self):
self.friend_dir = BASE_DIR + self.username + '/friend/' + 'friend_detail_list.csv'
self.history_like_agree_file_name = BASE_DIR + self.username + '/friend/' + 'history_like_list.json'
RESULT_BASE_DIR = self.USER_BASE_DIR + "data/result/"
self.MOOD_DATA_FILE_NAME = RESULT_BASE_DIR + 'mood_data.csv'
self.MOOD_DATA_EXCEL_FILE_NAME = RESULT_BASE_DIR + 'mood_data.xlsx'
LABEL_BASE_DIR = self.USER_BASE_DIR + "data/label/"
self.LABEL_FILE_CSV = LABEL_BASE_DIR + 'label_data.csv'
self.LABEL_FILE_EXCEL = LABEL_BASE_DIR + 'label_data.xlsx'
self.label_path = self.USER_BASE_DIR + 'data/label/'
self.image_path = self.USER_BASE_DIR + 'image/'
util.check_dir_exist(RESULT_BASE_DIR)
util.check_dir_exist(LABEL_BASE_DIR)
util.check_dir_exist(self.label_path)
util.check_dir_exist(self.image_path)
def load_all_data_from_json(self):
self.content = self.load_data_from_json(self.CONTENT_FILE_NAME)
self.like_list_names = self.load_data_from_json(self.LIKE_LIST_NAME_FILE_NAME)
self.mood_details = self.load_data_from_json(self.MOOD_DETAIL_FILE_NAME)
self.like_detail = self.load_data_from_json(self.LIKE_DETAIL_FILE_NAME)
print("Success to Load Data From Json")
def load_data_from_json(self, file_name):
try:
with open(file_name, encoding='utf-8') as content:
data = json.load(content)
return data
except BaseException as e:
self.format_error(e, 'Failed to load data ' + file_name)
def delete_cache(self):
self.re.delete(self.LIKE_LIST_NAME_FILE_NAME)
self.re.delete(self.MOOD_DETAIL_FILE_NAME)
self.re.delete(self.LIKE_DETAIL_FILE_NAME)
def save_data_to_redis(self, final_result=False):
"""
保存数据到redis中
:param final_result: 是否为最终结果,如果是,则会保存错误信息,如果不是,则仅做缓存
:return:
"""
try:
if self.use_redis:
self.re.set(self.CONTENT_FILE_NAME, json.dumps(self.content, ensure_ascii=False))
if self.download_like_names:
self.re.set(self.LIKE_LIST_NAME_FILE_NAME,
json.dumps(self.like_list_names, ensure_ascii=False))
if self.download_mood_detail:
self.re.set(self.MOOD_DETAIL_FILE_NAME,
json.dumps(self.mood_details, ensure_ascii=False))
if self.download_like_detail:
self.re.set(self.LIKE_DETAIL_FILE_NAME,
json.dumps(self.like_detail, ensure_ascii=False))
if not self.no_delete:
self.re.expire(self.LIKE_LIST_NAME_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.MOOD_DETAIL_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.LIKE_DETAIL_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
if final_result:
if self.download_like_detail:
self.re.set(self.ERROR_LIKE_DETAIL_FILE_NAME,
json.dumps(self.error_like_detail, ensure_ascii=False))
self.re.set(self.ERROR_LIKE_DETAIL_UNIKEY_FILE_NAME, "==".join(self.error_like_detail_unikeys))
if self.download_like_names:
self.re.set(self.ERROR_LIKE_LIST_NAME_FILE_NAME,
json.dumps(self.error_like_list, ensure_ascii=False))
self.re.set(self.ERROR_LIKE_LIST_NAME_UNIKEY_FILE_NAME, "==".join(self.error_like_list_unikeys))
if self.download_mood_detail:
self.re.set(self.ERROR_MOOD_DETAIL_FILE_NAME,
json.dumps(self.error_mood, ensure_ascii=False))
self.re.set(self.ERROR_MOOD_DETAIL_UNIKEY_FILE_NAME, "==".join(self.error_mood_unikeys))
if not self.no_delete:
self.re.expire(self.ERROR_LIKE_DETAIL_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_LIKE_DETAIL_UNIKEY_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_LIKE_LIST_NAME_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_LIKE_LIST_NAME_UNIKEY_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_MOOD_DETAIL_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_MOOD_DETAIL_UNIKEY_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
except BaseException as e:
self.format_error(e, 'Faild to save data in redis')
def save_data_to_json(self, data, file_name):
try:
with open(file_name, 'w', encoding='utf-8') as w2:
json.dump(data, w2, ensure_ascii=False)
except BaseException as e:
self.format_error(e, 'Failed to save file:' + file_name)
def save_data_to_txt(self, data, file_name):
try:
with open(file_name, 'w', encoding='utf-8') as w:
w.write(";".join(data))
except BaseException as e:
self.format_error(e, 'Failed to save file:' + file_name)
def save_all_data_to_json(self):
self.save_data_to_json(data=self.content, file_name=self.CONTENT_FILE_NAME)
if self.download_mood_detail:
self.save_data_to_json(data=self.mood_details, file_name=self.MOOD_DETAIL_FILE_NAME)
self.save_data_to_json(data=self.error_mood, file_name=self.ERROR_MOOD_DETAIL_FILE_NAME)
self.save_data_to_txt(data=self.error_mood_unikeys, file_name=self.ERROR_MOOD_DETAIL_UNIKEY_FILE_NAME)
if self.download_like_names:
self.save_data_to_json(data=self.like_detail, file_name=self.LIKE_DETAIL_FILE_NAME)
self.save_data_to_json(data=self.error_like_detail, file_name=self.ERROR_LIKE_DETAIL_FILE_NAME)
self.save_data_to_txt(data=self.error_like_detail_unikeys, file_name=self.ERROR_LIKE_DETAIL_FILE_NAME)
if self.download_like_detail:
self.save_data_to_json(data=self.like_list_names, file_name=self.LIKE_LIST_NAME_FILE_NAME)
self.save_data_to_json(data=self.error_like_list, file_name=self.ERROR_LIKE_LIST_NAME_FILE_NAME)
self.save_data_to_txt(data=self.error_like_list_unikeys,
file_name=self.ERROR_LIKE_LIST_NAME_UNIKEY_FILE_NAME)
self.save_data_to_redis(final_result=True)
def connect_redis(self):
conn = get_redis_conn(self.pool_flag)
if conn is None:
print("连接数据库失败")
exit(1)
else:
return conn
def check_time(self, mood, stop_time, until_stop_time=True):
create_time = mood['created_time']
if self.debug:
print('time:', create_time, stop_time)
if stop_time >= create_time:
until_stop_time = False
print('达到设置的停止时间,即将退出爬虫')
return until_stop_time
else:
return until_stop_time
def check_comment_num(self, mood):
cmt_num = mood['cmtnum']
if cmt_num > 20:
return cmt_num
else:
return -1
def download_image(self, url, name):
image_url = url
try:
r = self.req.get(url=image_url, headers=self.headers, timeout=20)
image_content = r.content
# 异步保存图片,提高效率
# t = threading.Thread(target=self.save_image_concurrent, args=(image_content, name))
# t.start()
thread = self.image_thread_pool.get_thread()
t = thread(target=self.save_image_concurrent, args=(image_content, name))
t.start()
# t = self.image_thread_pool2.submit(self.save_image_concurrent, (image_content, name))
except BaseException as e:
self.format_error(e, 'Failed to download image:' + name)
def save_image_concurrent(self, image, name):
try:
file_image = open(name + '.jpg', 'wb+')
file_image.write(image)
file_image.close()
self.image_thread_pool.add_thread()
except BaseException as e:
self.format_error(e, "Failed to save image:" + name)
def save_image_single(self, image, name):
try:
file_image = open(name + '.jpg', 'wb+')
file_image.write(image)
file_image.close()
except BaseException as e:
self.format_error(e, "Failed to save image:" + name)
def show_image(self, file_path):
t = threading.Thread(target=self.do_show_image, args=(file_path,))
t.start()
def do_show_image(self, file_path):
image = mpimg.imread(file_path)
plt.imshow(image)
plt.axis('off')
plt.show()
def result_report(self):
print("#######################")
print('爬取用户:', self.username)
print('总耗时:', (datetime.datetime.now() - self.begin_time).seconds / 60, '分钟')
print('QQ空间动态数据数量:', len(self.mood_details))
print('最终失败的数据量:')
print('--------------')
print('动态:', len(self.error_mood_unikeys))
print('点赞详情(包括浏览量):', len(self.error_like_detail_unikeys))
print('点赞好友列表:', len(self.error_like_list_unikeys))
print('--------------')
print("########################")
|
test_order.py
|
import pytest
import grpc
import threading
from stub.test_pb2 import EchoRequest, Empty
from order_stub.order_pb2 import OrderCreateReq
@pytest.fixture(scope='module')
def grpc_add_to_server():
from stub.test_pb2_grpc import add_EchoServiceServicer_to_server
from order_stub.order_service_pb2_grpc import add_OrderServiceServicer_to_server
return add_OrderServiceServicer_to_server
@pytest.fixture(scope='module')
def grpc_servicer():
from order_servicer import Servicer
return Servicer()
@pytest.fixture(scope='module')
def grpc_stub(grpc_channel):
from stub.test_pb2_grpc import EchoServiceStub
from order_stub.order_service_pb2_grpc import OrderServiceStub
return OrderServiceStub(grpc.insecure_channel("localhost:9090"))
def test_some(grpc_stub):
request = OrderCreateReq()
response = grpc_stub.handler(request)
assert response.name == f'test-{request.name}'
def test_example(grpc_stub):
request = EchoRequest()
response = grpc_stub.error_handler(request)
print("1111")
assert response.name == f'test-{request.name}'
grpc_max_workers = 2
def test_blocking(grpc_stub):
stream = grpc_stub.blocking(Empty())
# after this call the servicer blocks its thread
def call_unblock():
# with grpc_max_workers = 1 this call could not be executed
grpc_stub.unblock(Empty())
grpc_stub.unblock(Empty())
t = threading.Thread(target=call_unblock)
t.start()
for resp in stream:
pass
t.join()
|
ri_vmware.py
|
import requests
import json
import os
import subprocess as sp
from threading import Thread
import socket
# https://www.youtube.com/watch?v=AsOm56jGNCE&ab_channel=ThePacketThrower
# openssl req -x509 -sha256 -nodes -newkey rsa:4096 -keyout vmware-key.pem -out vmware-crt.pem -days 365
class VmWare:
def __init__(self):
self.url = "https://127.0.0.1:8697/api/vms"
self.start_path = r'C:\Program Files (x86)\VMware\VMware Workstation\certs'
self.rest_app = r'C:\Program Files (x86)\VMware\VMware Workstation\vmrest.exe'
self.server = 'vmrest.exe'
self.payload = {}
self.headers = {
'Accept': 'application/vnd.vmware.vmw.rest-v1+json',
'Content-Type': 'application/vnd.vmware.vmw.rest-v1+json',
}
self.username = os.environ['VMWARE_USERNAME']
self.password = os.environ['VMWARE_PASSWORD']
self.server_process = None
self.port = 8697
self.start()
def server_is_running(self):
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
location = ("127.0.0.1", self.port)
result_of_check = a_socket.connect_ex(location)
a_socket.close()
if result_of_check == 0:
print('PORT IS OPEN')
return True # means port is open
else:
print('PORT IS CLOSE')
return False
def start_server(self):
sp.run(f'"{self.rest_app}" -c vmware-crt.pem -k vmware-key.pem', shell=True, capture_output=True, text=True)
def start(self):
os.chdir(self.start_path)
if not self.server_is_running():
print('STARTING SERVER')
self.server_process = Thread(target=self.start_server)
self.server_process.start()
def close(self):
os.system(f"taskkill /f /im {self.server}")
def get_vms_all(self):
response = requests.request("GET", self.url, headers=self.headers, auth=(self.username, self.password),
data=self.payload, verify=False)
return json.loads(response.content)
def get_vm_id(self, name):
vms = self.get_vms_all()
for vm_dict in vms:
if vm_dict['path'].split('\\')[-1] == name:
return vm_dict['id']
return None
def change_state(self, state, name):
""":key
state = on, off, suspended
valid operations = VM power operation: on, off, shutdown, suspend, pause, unpause
"""
states = {'on': "poweredOn", 'off': "poweredOff"}
vm_id = self.get_vm_id(name)
cur_state = self.power_state(name, vm_id=vm_id)
if cur_state:
if cur_state['power_state'] != states[state.lower()]:
response = requests.request("PUT", f'{self.url}/{vm_id}/power', headers=self.headers,
auth=(self.username, self.password),
data=state, verify=False)
return json.loads(response.content)
return {'msg': f'state = {states[state]}'}
def power_state(self, name, vm_id=None):
""":key
returns {"power_state": "poweredOff"} or {"power_state": "poweredOn"} or None
"""
def get_state(vm_id):
url = f'{self.url}/{vm_id}/power'
response = requests.request("GET", url, headers=self.headers, auth=(self.username, self.password),
data=self.payload, verify=False)
return json.loads(response.content)
if vm_id:
return get_state(vm_id)
else:
vm_id = self.get_vm_id(name)
if vm_id:
return get_state(vm_id)
return None
def ip_address(self, name, vm_id=None):
def get_ip(vm_id):
url = f'{self.url}/{vm_id}/ip'
response = requests.request("GET", url, headers=self.headers, auth=(self.username, self.password),
data=self.payload, verify=False)
return json.loads(response.content)
if vm_id:
return get_ip(vm_id)
else:
vm_id = self.get_vm_id(name)
if vm_id:
return get_ip(vm_id)
return None
# a = VmWare().get_vm_id('Ubuntu.vmx')
# print('id ->', a)
# obj = VmWare()
# a = obj.change_state(name='Ubuntu.vmx', state='off')
# print('state ->', a)
# obj.close()
# obj = VmWare()
# a = obj.ip_address(name='Ubuntu.vmx')
# print('ip ->', a)
# obj.close()
# r = requests.put('https://127.0.0.1:8697/api/vms/7A7K84LC1BARGPQFV3S3PHV3CLRVB2TB/power', headers=headers,
# verify=False, data='off')
#
# print(json.loads(r.content))
|
conftest.py
|
import pytest
import time
from context import HGECtx, HGECtxError, ActionsWebhookServer, EvtsWebhookServer, HGECtxGQLServer, GQLWsClient, PytestConf
import threading
import random
from datetime import datetime
import sys
import os
from collections import OrderedDict
def pytest_addoption(parser):
parser.addoption(
"--hge-urls",
metavar="HGE_URLS",
help="csv list of urls for graphql-engine",
required=False,
nargs='+'
)
parser.addoption(
"--pg-urls", metavar="PG_URLS",
help="csv list of urls for connecting to Postgres directly",
required=False,
nargs='+'
)
parser.addoption(
"--hge-key", metavar="HGE_KEY", help="admin secret key for graphql-engine", required=False
)
parser.addoption(
"--hge-webhook", metavar="HGE_WEBHOOK", help="url for graphql-engine's access control webhook", required=False
)
parser.addoption(
"--test-webhook-insecure", action="store_true",
help="Run Test cases for insecure https webhook"
)
parser.addoption(
"--test-webhook-request-context", action="store_true",
help="Run Test cases for testing webhook request context"
)
parser.addoption(
"--hge-jwt-key-file", metavar="HGE_JWT_KEY_FILE", help="File containting the private key used to encode jwt tokens using RS512 algorithm", required=False
)
parser.addoption(
"--hge-jwt-conf", metavar="HGE_JWT_CONF", help="The JWT conf", required=False
)
parser.addoption(
"--test-cors", action="store_true",
required=False,
help="Run testcases for CORS configuration"
)
parser.addoption(
"--test-ws-init-cookie",
metavar="read|noread",
required=False,
help="Run testcases for testing cookie sending over websockets"
)
parser.addoption(
"--test-metadata-disabled", action="store_true",
help="Run Test cases with metadata queries being disabled"
)
parser.addoption(
"--test-graphql-disabled", action="store_true",
help="Run Test cases with GraphQL queries being disabled"
)
parser.addoption(
"--test-hge-scale-url",
metavar="<url>",
required=False,
help="Run testcases for horizontal scaling"
)
parser.addoption(
"--test-allowlist-queries", action="store_true",
help="Run Test cases with allowlist queries enabled"
)
parser.addoption(
"--test-logging",
action="store_true",
default=False,
required=False,
help="Run testcases for logging"
)
parser.addoption(
"--test-function-permissions",
action="store_true",
required=False,
help="Run manual function permission tests"
)
parser.addoption(
"--test-jwk-url",
action="store_true",
default=False,
required=False,
help="Run testcases for JWK url behaviour"
)
parser.addoption(
"--accept",
action="store_true",
default=False,
required=False,
help="Accept any failing test cases from YAML files as correct, and write the new files out to disk."
)
parser.addoption(
"--skip-schema-teardown",
action="store_true",
default=False,
required=False,
help="""
Skip tearing down the schema/Hasura metadata after tests. This option may result in test failures if the schema
has to change between the list of tests to be run
"""
)
parser.addoption(
"--skip-schema-setup",
action="store_true",
default=False,
required=False,
help="""
Skip setting up schema/Hasura metadata before tests.
This option may result in test failures if the schema has to change between the list of tests to be run
"""
)
parser.addoption(
"--avoid-error-message-checks",
action="store_true",
default=False,
required=False,
help="""
This option when set will ignore disparity in error messages between expected and response outputs.
Used basically in version upgrade/downgrade tests where the error messages may change
"""
)
parser.addoption(
"--collect-upgrade-tests-to-file",
metavar="<path>",
required=False,
help="When used along with collect-only, it will write the list of upgrade tests into the file specified"
)
parser.addoption(
"--test-unauthorized-role",
action="store_true",
help="Run testcases for unauthorized role",
)
parser.addoption(
"--enable-remote-schema-permissions",
action="store_true",
default=False,
help="Flag to indicate if the graphql-engine has enabled remote schema permissions",
)
parser.addoption(
"--test-inherited-roles",
action="store_true",
default=False,
help="Flag to specify if the inherited roles tests are to be run"
)
parser.addoption(
"--redis-url",
metavar="REDIS_URL",
help="redis url for cache server",
default=False
)
parser.addoption(
"--backend",
help="run integration tests using a particular backend",
default="postgres"
)
#By default,
#1) Set default parallelism to one
#2) Set test grouping to by filename (--dist=loadfile)
def pytest_cmdline_preparse(config, args):
worker = os.environ.get('PYTEST_XDIST_WORKER')
if 'xdist' in sys.modules and not worker: # pytest-xdist plugin
num = 1
args[:] = ["-n" + str(num),"--dist=loadfile"] + args
def pytest_configure(config):
# Pytest has removed the global pytest.config
# As a solution we are going to store it in PytestConf.config
PytestConf.config = config
if is_help_option_present(config):
return
if is_master(config):
if not config.getoption('--hge-urls'):
print("hge-urls should be specified")
if not config.getoption('--pg-urls'):
print("pg-urls should be specified")
config.hge_url_list = config.getoption('--hge-urls')
config.pg_url_list = config.getoption('--pg-urls')
config.hge_ctx_gql_server = HGECtxGQLServer(config.hge_url_list)
if config.getoption('-n', default=None):
xdist_threads = config.getoption('-n')
assert xdist_threads <= len(config.hge_url_list), "Not enough hge_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.hge_url_list))
assert xdist_threads <= len(config.pg_url_list), "Not enough pg_urls specified, Required " + str(xdist_threads) + ", got " + str(len(config.pg_url_list))
random.seed(datetime.now())
@pytest.hookimpl()
def pytest_report_collectionfinish(config, startdir, items):
"""
Collect server upgrade tests to the given file
"""
tests_file = config.getoption('--collect-upgrade-tests-to-file')
sep=''
tests=OrderedDict()
if tests_file:
def is_upgrade_test(item):
# Check if allow_server_upgrade_tests marker are present
# skip_server_upgrade_tests marker is not present
return item.get_closest_marker('allow_server_upgrade_test') \
and not item.get_closest_marker('skip_server_upgrade_test')
with open(tests_file,'w') as f:
upgrade_items = filter(is_upgrade_test, items)
for item in upgrade_items:
# This test should be run separately,
# since its schema setup has function scope
if 'per_method_tests_db_state' in item.fixturenames:
tests[item.nodeid] = True
elif any([ (x in item.fixturenames)
for x in
[ 'per_class_tests_db_state',
'per_class_db_schema_for_mutation_tests'
]
]):
# For this test, schema setup has class scope
# We can run a class of these tests at a time
tests[item.parent.nodeid] = True
# Assume tests can only be run separately
else:
tests[item.nodeid] = True
for test in tests.keys():
f.write(test + '\n')
return ''
@pytest.hookimpl(optionalhook=True)
def pytest_configure_node(node):
if is_help_option_present(node.config):
return
# Pytest has removed the global pytest.config
node.slaveinput["hge-url"] = node.config.hge_url_list.pop()
node.slaveinput["pg-url"] = node.config.pg_url_list.pop()
def pytest_unconfigure(config):
if is_help_option_present(config):
return
config.hge_ctx_gql_server.teardown()
@pytest.fixture(scope='module')
def hge_ctx(request):
config = request.config
print("create hge_ctx")
if is_master(config):
hge_url = config.hge_url_list[0]
else:
hge_url = config.slaveinput["hge-url"]
if is_master(config):
pg_url = config.pg_url_list[0]
else:
pg_url = config.slaveinput["pg-url"]
try:
hge_ctx = HGECtx(hge_url, pg_url, config)
except HGECtxError as e:
assert False, "Error from hge_cxt: " + str(e)
# TODO this breaks things (https://github.com/pytest-dev/pytest-xdist/issues/86)
# so at least make sure the real error gets printed (above)
pytest.exit(str(e))
yield hge_ctx # provide the fixture value
print("teardown hge_ctx")
hge_ctx.teardown()
# TODO why do we sleep here?
time.sleep(1)
@pytest.fixture(scope='class')
def evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5592))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='module')
def actions_fixture(hge_ctx):
pg_version = hge_ctx.pg_version
if pg_version < 100000: # version less than 10.0
pytest.skip('Actions are not supported on Postgres version < 10')
# Start actions' webhook server
webhook_httpd = ActionsWebhookServer(hge_ctx, server_address=('127.0.0.1', 5593))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='class')
def functions_permissions_fixtures(hge_ctx):
if not hge_ctx.function_permissions:
pytest.skip('These tests are meant to be run with --test-function-permissions set')
return
@pytest.fixture(scope='class')
def inherited_role_fixtures(hge_ctx):
if not hge_ctx.inherited_roles_tests:
pytest.skip('These tests are meant to be run with --test-inherited-roles set')
return
@pytest.fixture(scope='class')
def scheduled_triggers_evts_webhook(request):
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5594))
web_server = threading.Thread(target=webhook_httpd.serve_forever)
web_server.start()
yield webhook_httpd
webhook_httpd.shutdown()
webhook_httpd.server_close()
web_server.join()
@pytest.fixture(scope='class')
def gql_server(request, hge_ctx):
server = HGECtxGQLServer(request.config.getoption('--pg-urls'), 5991)
yield server
server.teardown()
@pytest.fixture(scope='class')
def ws_client(request, hge_ctx):
"""
This fixture provides an Apollo GraphQL websockets client
"""
client = GQLWsClient(hge_ctx, '/v1/graphql')
time.sleep(0.1)
yield client
client.teardown()
@pytest.fixture(scope='class')
def per_class_tests_db_state(request, hge_ctx):
"""
Set up the database state for select queries.
Has a class level scope, since select queries does not change database state
Expects either `dir()` method which provides the directory
with `setup.yaml` and `teardown.yaml` files
Or class variables `setup_files` and `teardown_files` that provides
the list of setup and teardown files respectively
"""
yield from db_state_context(request, hge_ctx)
@pytest.fixture(scope='class')
def per_class_tests_db_state_new(request, hge_ctx):
"""
Set up the database state for select queries.
Has a class level scope, since select queries does not change database state
Expects either `dir()` method which provides the directory
with `setup.yaml` and `teardown.yaml` files
Or class variables `setup_files` and `teardown_files` that provides
the list of setup and teardown files respectively
"""
print ("per_class_tests_db_state_new")
yield from db_state_context_new(request, hge_ctx)
@pytest.fixture(scope='function')
def per_method_tests_db_state(request, hge_ctx):
"""
This fixture sets up the database state for metadata operations
Has a function level scope, since metadata operations may change both the schema and data
Class method/variable requirements are similar to that of per_class_tests_db_state fixture
"""
yield from db_state_context(request, hge_ctx)
@pytest.fixture(scope='class')
def per_class_db_schema_for_mutation_tests(request, hge_ctx):
"""
This fixture sets up the database schema for mutations.
It has a class level scope, since mutations does not change schema.
Expects either `dir()` class method which provides the directory with `schema_setup.yaml` and `schema_teardown.yaml` files,
or variables `schema_setup_files` and `schema_teardown_files`
that provides the list of setup and teardown files respectively
"""
yield from db_context_with_schema_common(
request, hge_ctx, 'schema_setup_files', 'schema_setup.yaml', 'schema_teardown_files', 'schema_teardown.yaml', True
)
@pytest.fixture(scope='function')
def per_method_db_data_for_mutation_tests(request, hge_ctx, per_class_db_schema_for_mutation_tests):
"""
This fixture sets up the data for mutations.
Has a function level scope, since mutations may change data.
Having just the setup file(s), or the teardown file(s) is allowed.
Expects either `dir()` class method which provides the directory with `values_setup.yaml` and / or `values_teardown.yaml` files.
The class may provide `values_setup_files` variables which contains the list of data setup files,
Or the `values_teardown_files` variable which provides the list of data teardown files.
"""
yield from db_context_common(
request, hge_ctx, 'values_setup_files', 'values_setup.yaml',
'values_teardown_files', 'values_teardown.yaml',
False, False, False
)
@pytest.fixture(scope='function')
def per_backend_tests(hge_ctx, backend):
"""
This fixture ignores backend-specific tests unless the relevant --backend flag has been passed.
"""
# Currently, we default all tests to run on Postgres with or without a --backend flag.
# As our test suite develops, we may consider running backend-agnostic tests on all
# backends, unless a specific `--backend` flag is passed.
if not hge_ctx.backend == backend:
pytest.skip(
'Skipping test. Add --backend ' + backend + ' to run backend-specific tests'
)
return
def db_state_context(request, hge_ctx):
# Non-default (Postgres) backend tests expect separate setup and schema_setup
# files for v1/metadata and v2/query requests, respectively.
(setup, teardown, schema_setup, schema_teardown) = [
hge_ctx.backend_suffix(filename) + ".yaml"
for filename in ['setup', 'teardown', 'schema_setup', 'schema_teardown']
]
if hge_ctx.backend == 'postgres':
db_context = db_context_with_schema_common(
request, hge_ctx, 'setup_files', 'setup.yaml', 'teardown_files',
'teardown.yaml', True
)
else:
db_context = db_context_with_schema_common_new (
request, hge_ctx, 'setup_files', setup, 'teardown_files',
teardown, schema_setup, schema_teardown, True
)
yield from db_context
def db_state_context_new(
request, hge_ctx, setup='setup.yaml', teardown='teardown.yaml',
schema_setup='schema_setup.yaml', schema_teardown='schema_teardown.yaml'):
yield from db_context_with_schema_common_new (
request, hge_ctx, 'setup_files', setup, 'teardown_files',
teardown, schema_setup, schema_teardown, True
)
def db_context_with_schema_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file, check_file_exists=True):
(skip_setup, skip_teardown) = [
request.config.getoption('--' + x)
for x in ['skip-schema-setup', 'skip-schema-teardown']
]
yield from db_context_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file,
check_file_exists, skip_setup, skip_teardown
)
def db_context_with_schema_common_new (
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file, setup_sql_file, teardown_sql_file, check_file_exists=True):
(skip_setup, skip_teardown) = [
request.config.getoption('--' + x)
for x in ['skip-schema-setup', 'skip-schema-teardown']
]
yield from db_context_common_new (
request, hge_ctx, setup_files_attr, setup_default_file, setup_sql_file,
teardown_files_attr, teardown_default_file, teardown_sql_file,
check_file_exists, skip_setup, skip_teardown
)
def db_context_common(
request, hge_ctx, setup_files_attr, setup_default_file,
teardown_files_attr, teardown_default_file,
check_file_exists=True, skip_setup=True, skip_teardown=True ):
def get_files(attr, default_file):
files = getattr(request.cls, attr, None)
if not files:
files = os.path.join(request.cls.dir(), default_file)
return files
setup = get_files(setup_files_attr, setup_default_file)
teardown = get_files(teardown_files_attr, teardown_default_file)
yield from setup_and_teardown_v1q(request, hge_ctx, setup, teardown, check_file_exists, skip_setup, skip_teardown)
def db_context_common_new(
request, hge_ctx, setup_files_attr, setup_default_file,
setup_default_sql_file,
teardown_files_attr, teardown_default_file, teardown_default_sql_file,
check_file_exists=True, skip_setup=True, skip_teardown=True ):
def get_files(attr, default_file):
files = getattr(request.cls, attr, None)
if not files:
files = os.path.join(request.cls.dir(), default_file)
return files
setup = get_files(setup_files_attr, setup_default_file)
teardown = get_files(teardown_files_attr, teardown_default_file)
setup_default_sql_file = os.path.join(request.cls.dir(), setup_default_sql_file)
teardown_default_sql_file = os.path.join(request.cls.dir(), teardown_default_sql_file)
yield from setup_and_teardown(request, hge_ctx, setup, teardown,
setup_default_sql_file, teardown_default_sql_file, check_file_exists, skip_setup, skip_teardown)
def setup_and_teardown_v1q(request, hge_ctx, setup_files, teardown_files, check_file_exists=True, skip_setup=False, skip_teardown=False):
def assert_file_exists(f):
assert os.path.isfile(f), 'Could not find file ' + f
if check_file_exists:
for o in [setup_files, teardown_files]:
run_on_elem_or_list(assert_file_exists, o)
def v1q_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v1q_f(f)
assert st_code == 200, resp
if not skip_setup:
run_on_elem_or_list(v1q_f, setup_files)
yield
# Teardown anyway if any of the tests have failed
if request.session.testsfailed > 0 or not skip_teardown:
run_on_elem_or_list(v1q_f, teardown_files)
def setup_and_teardown(request, hge_ctx, setup_files, teardown_files,
sql_schema_setup_file,sql_schema_teardown_file,
check_file_exists=True, skip_setup=False, skip_teardown=False):
def assert_file_exists(f):
assert os.path.isfile(f), 'Could not find file ' + f
if check_file_exists:
for o in [setup_files, teardown_files, sql_schema_setup_file, sql_schema_teardown_file]:
run_on_elem_or_list(assert_file_exists, o)
def v2q_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v2q_f(f)
assert st_code == 200, resp
def metadataq_f(f):
if os.path.isfile(f):
st_code, resp = hge_ctx.v1metadataq_f(f)
if st_code != 200:
# drop the sql setup, if the metadata calls fail
run_on_elem_or_list(v2q_f, sql_schema_teardown_file)
assert st_code == 200, resp
if not skip_setup:
run_on_elem_or_list(v2q_f, sql_schema_setup_file)
run_on_elem_or_list(metadataq_f, setup_files)
yield
# Teardown anyway if any of the tests have failed
if request.session.testsfailed > 0 or not skip_teardown:
run_on_elem_or_list(metadataq_f, teardown_files)
run_on_elem_or_list(v2q_f, sql_schema_teardown_file)
def run_on_elem_or_list(f, x):
if isinstance(x, str):
return [f(x)]
elif isinstance(x, list):
return [f(e) for e in x]
def is_help_option_present(config):
return any([
config.getoption(x)
for x in ['--fixtures','--help', '--collect-only']
])
def is_master(config):
"""True if the code running the given pytest.config object is running in a xdist master
node or not running xdist at all.
"""
return not hasattr(config, 'slaveinput')
use_inherited_roles_fixtures = pytest.mark.usefixtures(
"inherited_role_fixtures",
"per_class_tests_db_state_new"
)
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import logging
import threading
import tvm
from tvm import autotvm, transform
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import Reduce
from tvm.tir import expr as _expr
from . import _ffi_api
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .utils import get_const_tuple
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_runtime_codegen
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=3,
config={
"relay.backend.use_auto_scheduler": True,
"relay.backend.disable_compile_engine_cache": True,
},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
try:
opt_mod, _ = relay.optimize(mod, target, params)
grc = graph_runtime_codegen.GraphRuntimeCodegen(None, target)
grc.codegen(opt_mod["main"])
except tvm.TVMError:
print(
"Get errors with GraphRuntimeCodegen for task extraction. "
"Fallback to VMCompiler."
)
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
mod = tvm.IRModule.from_expr(mod) if isinstance(mod, relay.Function) else mod
compiler.lower(mod, target)
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod, params, target, target_host=None, hardware_params=None, include_simple_tasks=False
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
if isinstance(target, str):
target = tvm.target.Target(target)
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
dispatch_ctx = DispatchContext.current
old_verbose = dispatch_ctx.verbose
dispatch_ctx.verbose = 0
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(target=call_all_topi_funcs, args=(mod, params, target))
build_thread.start()
build_thread.join()
dispatch_ctx.verbose = old_verbose
# create search tasks
tasks = []
weights = []
for wkl_key, weight in env.wkl_key_to_weight.items():
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
target_host=target_host,
hardware_params=hardware_params,
# When auto scheduler is used in end to end network, try to apply layout rewrite
# to improve the overall performance
layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True),
)
)
weights.append(weight)
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
EXTRACT_COMPLEX_TASK_ONLY = 1 # same as EXTRACT_TASK but ignore the task without complex ops
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.wkl_key_to_weight = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, workload_key):
"""Add the workload key of a search task
Parameters
----------
workload_key: str
The workload key of a task
"""
if workload_key not in self.wkl_key_to_weight:
self.wkl_key_to_weight[workload_key] = 0
self.wkl_key_to_weight[workload_key] += 1
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get input/output tensors and
other useful information.
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors with static shape
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
has_complex_op: bool
Whether the topi compute function includes at least one complex (reduce) op
"""
layout_free_ops = []
inputs = []
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body])
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in outs:
traverse(t)
io_tensors = inputs + list(outs)
for tensor in io_tensors:
# Reject the compute if any of its I/O tensors has dynamic shape.
if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]):
return ([], False, False)
return (io_tensors, len(layout_free_ops) > 0, has_complex_op)
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
None in the tracing mode so that the fallback topi schedule will be used.
"""
# pylint: disable=import-outside-toplevel
io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs)
if not io_tensors: # The compute includes dynamic shapes which are not supported yet.
return None
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
key = register_workload_tensors(dag.workload_key(), io_tensors)
target = tvm.target.Target.current()
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag)
schedule = None
env = TracingEnvironment.current
if env is None:
# in the final build mode
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
return schedule
if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
env.add_workload_key(key)
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if (
LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE
and has_layout_free
):
if state is None:
return None
# rewrite the layout and update the context for the new dag
new_dag = dag.rewrite_layout_from_state(state)
new_key = new_dag.workload_key()
if new_key != key:
dispatch_ctx.update(target, new_key, state)
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get("relay.backend.use_auto_scheduler", False)
|
xapp_frame.py
|
# ==================================================================================
# Copyright (c) 2020 Nokia
# Copyright (c) 2020 AT&T Intellectual Property.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================
"""
This framework for Python Xapps provides classes that Xapp writers
should instantiate and/or subclass depending on their needs.
"""
import json
import os
import queue
from threading import Thread
import inotify_simple
from mdclogpy import Logger
from ricxappframe import xapp_rmr
from ricxappframe.rmr import rmr
from ricxappframe.xapp_sdl import SDLWrapper
# message-type constants
RIC_HEALTH_CHECK_REQ = 100
RIC_HEALTH_CHECK_RESP = 101
# environment variable with path to configuration file
CONFIG_FILE_ENV = "CONFIG_FILE"
class _BaseXapp:
"""
This class initializes RMR, starts a thread that checks for incoming
messages, provisions an SDL object and optionally creates a
config-file watcher. This private base class should not be
instantiated by clients directly, but it defines many public methods
that may be used by clients.
If environment variable CONFIG_FILE is defined, and that variable
contains a path to an existing file, a watcher is defined to monitor
modifications (writes) to that file using the Linux kernel's inotify
feature. The watcher must be polled by calling method
config_check().
Parameters
----------
rmr_port: int (optional, default is 4562)
Port on which the RMR library listens for incoming messages.
rmr_wait_for_ready: bool (optional, default is True)
If this is True, then init waits until RMR is ready to send,
which includes having a valid routing file. This can be set
to False if the client wants to *receive only*.
use_fake_sdl: bool (optional, default is False)
if this is True, it uses the DBaaS "fake dict backend" instead
of Redis or other backends. Set this to True when developing
an xapp or during unit testing to eliminate the need for DBaaS.
post_init: function (optional, default is None)
Runs this user-provided function at the end of the init method;
its signature should be post_init(self)
"""
def __init__(self, rmr_port=4562, rmr_wait_for_ready=True, use_fake_sdl=False, post_init=None):
"""
Documented in the class comment.
"""
# PUBLIC, can be used by xapps using self.(name):
self.logger = Logger(name=__name__)
# Start rmr rcv thread
self._rmr_loop = xapp_rmr.RmrLoop(port=rmr_port, wait_for_ready=rmr_wait_for_ready)
self._mrc = self._rmr_loop.mrc # for convenience
# SDL
self.sdl = SDLWrapper(use_fake_sdl)
# Config
# The environment variable specifies the path to the Xapp config file
self._config_path = os.environ.get(CONFIG_FILE_ENV, None)
if self._config_path and os.path.isfile(self._config_path):
self._inotify = inotify_simple.INotify()
self._inotify.add_watch(self._config_path, inotify_simple.flags.MODIFY)
self.logger.debug("__init__: watching config file {}".format(self._config_path))
else:
self._inotify = None
self.logger.warning("__init__: NOT watching any config file")
# run the optionally provided user post init
if post_init:
post_init(self)
# Public rmr methods
def rmr_get_messages(self):
"""
Returns a generator iterable over all items in the queue that
have not yet been read by the client xapp. Each item is a tuple
(S, sbuf) where S is a message summary dict and sbuf is the raw
message. The caller MUST call rmr.rmr_free_msg(sbuf) when
finished with each sbuf to prevent memory leaks!
"""
while not self._rmr_loop.rcv_queue.empty():
(summary, sbuf) = self._rmr_loop.rcv_queue.get()
yield (summary, sbuf)
def rmr_send(self, payload, mtype, retries=100):
"""
Allocates a buffer, sets payload and mtype, and sends
Parameters
----------
payload: bytes
payload to set
mtype: int
message type
retries: int (optional)
Number of times to retry at the application level before excepting RMRFailure
Returns
-------
bool
whether or not the send worked after retries attempts
"""
sbuf = rmr.rmr_alloc_msg(vctx=self._mrc, size=len(payload), payload=payload, gen_transaction_id=True, mtype=mtype)
for _ in range(retries):
sbuf = rmr.rmr_send_msg(self._mrc, sbuf)
if sbuf.contents.state == 0:
self.rmr_free(sbuf)
return True
self.rmr_free(sbuf)
return False
def rmr_rts(self, sbuf, new_payload=None, new_mtype=None, retries=100):
"""
Allows the xapp to return to sender, possibly adjusting the
payload and message type before doing so. This does NOT free
the sbuf for the caller as the caller may wish to perform
multiple rts per buffer. The client needs to free.
Parameters
----------
sbuf: ctypes c_void_p
Pointer to an rmr message buffer
new_payload: bytes (optional)
New payload to set
new_mtype: int (optional)
New message type (replaces the received message)
retries: int (optional, default 100)
Number of times to retry at the application level
Returns
-------
bool
whether or not the send worked after retries attempts
"""
for _ in range(retries):
sbuf = rmr.rmr_rts_msg(self._mrc, sbuf, payload=new_payload, mtype=new_mtype)
if sbuf.contents.state == 0:
return True
self.logger.warning("RTS Failed! Summary: {}".format(rmr.message_summary(sbuf)))
return False
def rmr_free(self, sbuf):
"""
Frees an rmr message buffer after use
Note: this does not need to be a class method, self is not
used. However if we break it out as a function we need a home
for it.
Parameters
----------
sbuf: ctypes c_void_p
Pointer to an rmr message buffer
"""
rmr.rmr_free_msg(sbuf)
# Convenience (pass-thru) function for invoking SDL.
def sdl_set(self, namespace, key, value, usemsgpack=True):
"""
** Deprecate Warning **
** Will be removed in a future function **
Stores a key-value pair to SDL, optionally serializing the value
to bytes using msgpack.
Parameters
----------
namespace: string
SDL namespace
key: string
SDL key
value:
Object or byte array to store. See the `usemsgpack` parameter.
usemsgpack: boolean (optional, default is True)
Determines whether the value is serialized using msgpack before storing.
If usemsgpack is True, the msgpack function `packb` is invoked
on the value to yield a byte array that is then sent to SDL.
Stated differently, if usemsgpack is True, the value can be anything
that is serializable by msgpack.
If usemsgpack is False, the value must be bytes.
"""
self.sdl.set(namespace, key, value, usemsgpack)
def sdl_get(self, namespace, key, usemsgpack=True):
"""
** Deprecate Warning **
** Will be removed in a future function **
Gets the value for the specified namespace and key from SDL,
optionally deserializing stored bytes using msgpack.
Parameters
----------
namespace: string
SDL namespace
key: string
SDL key
usemsgpack: boolean (optional, default is True)
If usemsgpack is True, the byte array stored by SDL is deserialized
using msgpack to yield the original object that was stored.
If usemsgpack is False, the byte array stored by SDL is returned
without further processing.
Returns
-------
Value
See the usemsgpack parameter for an explanation of the returned value type.
Answers None if the key is not found.
"""
return self.sdl.get(namespace, key, usemsgpack)
def sdl_find_and_get(self, namespace, prefix, usemsgpack=True):
"""
** Deprecate Warning **
** Will be removed in a future function **
Gets all key-value pairs in the specified namespace
with keys that start with the specified prefix,
optionally deserializing stored bytes using msgpack.
Parameters
----------
nnamespaces: string
SDL namespace
prefix: string
the key prefix
usemsgpack: boolean (optional, default is True)
If usemsgpack is True, the byte array stored by SDL is deserialized
using msgpack to yield the original value that was stored.
If usemsgpack is False, the byte array stored by SDL is returned
without further processing.
Returns
-------
Dictionary of key-value pairs
Each key has the specified prefix.
The value object (its type) depends on the usemsgpack parameter,
but is either a Python object or raw bytes as discussed above.
Answers an empty dictionary if no keys matched the prefix.
"""
return self.sdl.find_and_get(namespace, prefix, usemsgpack)
def sdl_delete(self, namespace, key):
"""
** Deprecate Warning **
** Will be removed in a future function **
Deletes the key-value pair with the specified key in the specified namespace.
Parameters
----------
namespace: string
SDL namespace
key: string
SDL key
"""
self.sdl.delete(namespace, key)
# Health
def healthcheck(self):
"""
this needs to be understood how this is supposed to work
"""
return self._rmr_loop.healthcheck() and self.sdl.healthcheck()
# Convenience function for discovering config change events
def config_check(self, timeout=0):
"""
Checks the watcher for configuration-file events. The watcher
prerequisites and event mask are documented in __init__().
Parameters
----------
timeout: int (optional)
Number of seconds to wait for a configuration-file event, default 0.
Returns
-------
List of Events, possibly empty
An event is a tuple with objects wd, mask, cookie and name.
For example::
Event(wd=1, mask=1073742080, cookie=0, name='foo')
"""
if not self._inotify:
return []
events = self._inotify.read(timeout=timeout)
return list(events)
def stop(self):
"""
cleans up and stops the xapp rmr thread (currently). This is
critical for unit testing as pytest will never return if the
thread is running.
TODO: can we register a ctrl-c handler so this gets called on
ctrl-c? Because currently two ctrl-c are needed to stop.
"""
self._rmr_loop.stop()
# Public classes that Xapp writers should instantiate or subclass
# to implement an Xapp.
class RMRXapp(_BaseXapp):
"""
Represents an Xapp that reacts only to RMR messages; i.e., the Xapp
only performs an action when a message is received. Clients should
invoke the run method, which has a loop that waits for RMR messages
and calls the appropriate client-registered consume callback on each.
If environment variable CONFIG_FILE is defined, and that variable
contains a path to an existing file, this class polls a watcher
defined on that file to detect file-write events, and invokes a
configuration-change handler on each event. The handler is also
invoked at startup. If no handler function is supplied to the
constructor, this class defines a default handler that only logs a
message.
Parameters
----------
default_handler: function
A function with the signature (summary, sbuf) to be called when a
message type is received for which no other handler is registered.
default_handler argument summary: dict
The RMR message summary, a dict of key-value pairs
default_handler argument sbuf: ctypes c_void_p
Pointer to an RMR message buffer. The user must call free on this when done.
config_handler: function (optional, default is documented above)
A function with the signature (json) to be called at startup and each time
a configuration-file change event is detected. The JSON object is read from
the configuration file, if the prerequisites are met.
config_handler argument json: dict
The contents of the configuration file, parsed as JSON.
rmr_port: integer (optional, default is 4562)
Initialize RMR to listen on this port
rmr_wait_for_ready: boolean (optional, default is True)
Wait for RMR to signal ready before starting the dispatch loop
use_fake_sdl: boolean (optional, default is False)
Use an in-memory store instead of the real SDL service
post_init: function (optional, default None)
Run this function after the app initializes and before the dispatch loop starts;
its signature should be post_init(self)
"""
def __init__(self, default_handler, config_handler=None, rmr_port=4562, rmr_wait_for_ready=True, use_fake_sdl=False, post_init=None):
"""
Also see _BaseXapp
"""
# init base
super().__init__(
rmr_port=rmr_port, rmr_wait_for_ready=rmr_wait_for_ready, use_fake_sdl=use_fake_sdl, post_init=post_init
)
# setup callbacks
self._default_handler = default_handler
self._config_handler = config_handler
self._dispatch = {}
# used for thread control
self._keep_going = True
# register a default healthcheck handler
# this default checks that rmr is working and SDL is working
# the user can override this and register their own handler
# if they wish since the "last registered callback wins".
def handle_healthcheck(self, summary, sbuf):
healthy = self.healthcheck()
payload = b"OK\n" if healthy else b"ERROR [RMR or SDL is unhealthy]\n"
self.rmr_rts(sbuf, new_payload=payload, new_mtype=RIC_HEALTH_CHECK_RESP)
self.rmr_free(sbuf)
self.register_callback(handle_healthcheck, RIC_HEALTH_CHECK_REQ)
# define a default configuration-change handler if none was provided.
if not config_handler:
def handle_config_change(self, config):
self.logger.debug("xapp_frame: default config handler invoked")
self._config_handler = handle_config_change
# call the config handler at startup if prereqs were met
if self._inotify:
with open(self._config_path) as json_file:
data = json.load(json_file)
self.logger.debug("run: invoking config handler at start")
self._config_handler(self, data)
def register_callback(self, handler, message_type):
"""
registers this xapp to call handler(summary, buf) when an rmr message is received of type message_type
Parameters
----------
handler: function
a function with the signature (summary, sbuf) to be called
when a message of type message_type is received
summary: dict
the rmr message summary
sbuf: ctypes c_void_p
Pointer to an rmr message buffer. The user must call free on this when done.
message:type: int
the message type to look for
Note if this method is called multiple times for a single message type, the "last one wins".
"""
self._dispatch[message_type] = handler
def run(self, thread=False, rmr_timeout=5, inotify_timeout=0):
"""
This function should be called when the reactive Xapp is ready to start.
After start, the Xapp's handlers will be called on received messages.
Parameters
----------
thread: bool (optional, default is False)
If False, execution is not returned and the framework loops forever.
If True, a thread is started to run the queue read/dispatch loop
and execution is returned to caller; the thread can be stopped
by calling the .stop() method.
rmr_timeout: integer (optional, default is 5 seconds)
Length of time to wait for an RMR message to arrive.
inotify_timeout: integer (optional, default is 0 seconds)
Length of time to wait for an inotify event to arrive.
"""
def loop():
while self._keep_going:
# poll RMR
try:
(summary, sbuf) = self._rmr_loop.rcv_queue.get(block=True, timeout=rmr_timeout)
# dispatch
func = self._dispatch.get(summary[rmr.RMR_MS_MSG_TYPE], None)
if not func:
func = self._default_handler
self.logger.debug("run: invoking msg handler on type {}".format(summary[rmr.RMR_MS_MSG_TYPE]))
func(self, summary, sbuf)
except queue.Empty:
# the get timed out
pass
# poll configuration file watcher
try:
events = self.config_check(timeout=inotify_timeout)
for event in events:
with open(self._config_path) as json_file:
data = json.load(json_file)
self.logger.debug("run: invoking config handler on change event {}".format(event))
self._config_handler(self, data)
except Exception as error:
self.logger.error("run: configuration handler failed: {}".format(error))
if thread:
Thread(target=loop).start()
else:
loop()
def stop(self):
"""
Sets the flag to end the dispatch loop.
"""
super().stop()
self.logger.debug("Setting flag to end framework work loop.")
self._keep_going = False
class Xapp(_BaseXapp):
"""
Represents a generic Xapp where the client provides a single function
for the framework to call at startup time (instead of providing callback
functions by message type). The Xapp writer must implement and provide a
function with a loop-forever construct similar to the `run` function in
the `RMRXapp` class. That function should poll to retrieve RMR messages
and dispatch them appropriately, poll for configuration changes, etc.
Parameters
----------
entrypoint: function
This function is called when the Xapp class's run method is invoked.
The function signature must be just function(self)
rmr_port: integer (optional, default is 4562)
Initialize RMR to listen on this port
rmr_wait_for_ready: boolean (optional, default is True)
Wait for RMR to signal ready before starting the dispatch loop
use_fake_sdl: boolean (optional, default is False)
Use an in-memory store instead of the real SDL service
"""
def __init__(self, entrypoint, rmr_port=4562, rmr_wait_for_ready=True, use_fake_sdl=False):
"""
Parameters
----------
For the other parameters, see class _BaseXapp.
"""
# init base
super().__init__(rmr_port=rmr_port, rmr_wait_for_ready=rmr_wait_for_ready, use_fake_sdl=use_fake_sdl)
self._entrypoint = entrypoint
def run(self):
"""
This function should be called when the general Xapp is ready to start.
"""
self._entrypoint(self)
# there is no need for stop currently here (base has, and nothing
# special to do here)
|
classification.py
|
from multiprocessing import Process
import matplotlib.pyplot as plt
import show_slices as sl
import tensorflow as tf
import nibabel as nib
import numpy as np
import math
import cv2
tf.compat.v1.disable_eager_execution()
def calc():
img_px = math.ceil(50/4)
slice_ct = math.ceil(30/4)
return img_px * img_px * slice_ct * 64
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i: i + n]
def mean(l):
return sum(l)/len(l)
def apply_histogram(img_n):
img_n = np.array(img_n)
newImg = []
for i in img_n:
img = cv2.normalize(src=i, dst=None, alpha=0, beta=80,
norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
equ = cv2.equalizeHist(img)
newImg.append(equ.tolist())
return newImg
def process_data(path, apply_hist=True):
img = nib.load(path)
slices = img.get_fdata()
new_slices = []
slices = [cv2.resize(np.array(each_slice), (50, 50)) for each_slice in slices]
chunk_sizes = math.ceil(len(slices) / 100)
for slice_chunk in chunks(slices, chunk_sizes):
slice_chunk = list(map(mean, zip(*slice_chunk)))
new_slices.append(slice_chunk)
new_slices = new_slices[30:60]
if apply_hist:
new_slices = apply_histogram(new_slices)
newFile = nib.Nifti1Image(np.array(new_slices), img.affine)
nib.save(newFile, '/tmp/img_final.nii')
p = Process(target=sl.show_slices, args=('/tmp/img_final.nii', 'Imagem após equalização de histograma'))
p.start()
return np.array(new_slices)
def conv3d(x, W):
return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='SAME')
def maxpool3d(x):
return tf.nn.max_pool3d(x, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
def convolutional_neural_network(x):
number = calc()
weights = {'W_conv1': tf.Variable(tf.random_normal([3, 3, 3, 1, 32])),
'W_conv2': tf.Variable(tf.random_normal([3, 3, 3, 32, 64])),
'W_fc': tf.Variable(tf.random_normal([number, 1024])),
'out': tf.Variable(tf.random_normal([1024, 3]))}
biases = {'b_conv1': tf.Variable(tf.random_normal([32])),
'b_conv2': tf.Variable(tf.random_normal([64])),
'b_fc': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([3]))}
x = tf.reshape(x, shape=[-1, 50, 50, 30, 1])
conv1 = tf.nn.relu(conv3d(x, weights['W_conv1']) + biases['b_conv1'])
conv1 = maxpool3d(conv1)
conv2 = tf.nn.relu(conv3d(conv1, weights['W_conv2']) + biases['b_conv2'])
conv2 = maxpool3d(conv2)
fc = tf.reshape(conv2, [-1, number])
fc = tf.nn.relu(tf.matmul(fc, weights['W_fc']) + biases['b_fc'])
fc = tf.nn.dropout(fc, 1)
output = tf.matmul(fc, weights['out']) + biases['out']
return output
def classification(path):
x = tf.placeholder('float')
X_new = process_data(path=path, apply_hist=True)
pred = convolutional_neural_network(x)
res = 0
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
saver = tf.train.import_meta_graph('modelo.meta')
saver.restore(sess, 'modelo')
probabilities = tf.nn.softmax(pred)
c = sess.run(probabilities, feed_dict={x: X_new})
res = np.argmax(c)
return res
# print(classification('/tmp/A65COG_I31102.nii'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.